From 646cdd90ee01d51990f55570df833d8832e44647 Mon Sep 17 00:00:00 2001 From: Joey Vagedes Date: Tue, 1 Oct 2024 08:44:51 -0700 Subject: [PATCH] Fix ruff issues Runs `ruff format .` on the workspace. --- BasicDevTests.py | 19 +- ConfirmVersionAndTag.py | 3 +- docs/user/features/creating_invocable.md | 4 +- docs/user/gen_api.py | 29 +- edk2toolext/base_abstract_invocable.py | 31 +- edk2toolext/bin/nuget.py | 6 +- edk2toolext/capsule/capsule_helper.py | 88 +- edk2toolext/capsule/capsule_tool.py | 94 +- edk2toolext/capsule/pyopenssl_signer.py | 41 +- edk2toolext/capsule/signing_helper.py | 13 +- edk2toolext/capsule/signtool_signer.py | 93 +- edk2toolext/codeql.py | 15 +- edk2toolext/edk2_invocable.py | 102 +- edk2toolext/edk2_logging.py | 44 +- edk2toolext/edk2_report.py | 35 +- edk2toolext/environment/conf_mgmt.py | 22 +- .../environment_descriptor_files.py | 59 +- .../az_cli_universal_dependency.py | 28 +- .../environment/extdeptypes/git_dependency.py | 36 +- .../extdeptypes/nuget_dependency.py | 59 +- .../environment/extdeptypes/web_dependency.py | 17 +- .../environment/external_dependency.py | 68 +- edk2toolext/environment/multiple_workspace.py | 45 +- edk2toolext/environment/plugin_manager.py | 24 +- .../plugintypes/ci_build_plugin.py | 12 +- .../plugintypes/uefi_build_plugin.py | 4 +- .../plugintypes/uefi_helper_plugin.py | 20 +- edk2toolext/environment/repo_resolver.py | 139 ++- .../environment/reporttypes/base_report.py | 2 + .../reporttypes/component_report.py | 64 +- .../reporttypes/coverage_report.py | 180 ++-- .../environment/reporttypes/usage_report.py | 88 +- edk2toolext/environment/rust.py | 24 +- .../self_describing_environment.py | 99 +- edk2toolext/environment/shell_environment.py | 45 +- edk2toolext/environment/uefi_build.py | 271 +++-- edk2toolext/environment/var_dict.py | 72 +- edk2toolext/environment/version_aggregator.py | 21 +- edk2toolext/image_validation.py | 209 ++-- edk2toolext/invocables/edk2_ci_build.py | 80 +- edk2toolext/invocables/edk2_ci_setup.py | 49 +- .../edk2_multipkg_aware_invocable.py | 43 +- edk2toolext/invocables/edk2_parse.py | 86 +- edk2toolext/invocables/edk2_platform_build.py | 22 +- edk2toolext/invocables/edk2_pr_eval.py | 88 +- edk2toolext/invocables/edk2_setup.py | 35 +- edk2toolext/invocables/edk2_update.py | 21 +- edk2toolext/nuget_publishing.py | 339 ++++--- edk2toolext/omnicache.py | 304 +++--- edk2toolext/uefi/sig_db_tool.py | 64 +- edk2toolext/versioninfo/versioninfo_helper.py | 366 ++++--- edk2toolext/versioninfo/versioninfo_tool.py | 40 +- .../windows/policy/firmware_policy_tool.py | 97 +- .../windows/secureboot/secureboot_audit.py | 62 +- tests.unit/capsule/test_capsule_helper.py | 71 +- tests.unit/capsule/test_capsule_tool.py | 88 +- tests.unit/capsule/test_signing_helper.py | 12 +- .../test_az_cli_universal_dependency.py | 82 +- tests.unit/test_ci_build_plugin.py | 1 - tests.unit/test_codeql.py | 19 +- tests.unit/test_conf_mgmt.py | 9 +- tests.unit/test_edk2_ci_build.py | 38 +- tests.unit/test_edk2_ci_setup.py | 5 +- tests.unit/test_edk2_invocable.py | 48 +- tests.unit/test_edk2_logging.py | 186 ++-- tests.unit/test_edk2_plat_build.py | 5 +- tests.unit/test_edk2_setup.py | 56 +- tests.unit/test_edk2_update.py | 127 +-- tests.unit/test_external_dependency.py | 66 +- tests.unit/test_git_dependency.py | 30 +- tests.unit/test_image_validation.py | 256 ++--- tests.unit/test_nuget.py | 1 - tests.unit/test_nuget_dependency.py | 15 +- tests.unit/test_nuget_publish.py | 924 ++++++++++-------- tests.unit/test_omnicache.py | 284 +++--- tests.unit/test_pyopenssl_signer.py | 53 +- tests.unit/test_repo_resolver.py | 109 +-- tests.unit/test_rust_environment.py | 12 +- tests.unit/test_secureboot_audit.py | 13 +- .../test_self_describing_environment.py | 75 +- tests.unit/test_shell_environment.py | 175 ++-- tests.unit/test_signtool_signer.py | 37 +- tests.unit/test_uefi_build.py | 48 +- tests.unit/test_var_dict.py | 2 +- tests.unit/test_version_aggregator.py | 2 +- tests.unit/test_versioninfo.py | 280 +++--- tests.unit/test_web_dependency.py | 80 +- tests.unit/uefi_tree.py | 36 +- 88 files changed, 3901 insertions(+), 3265 deletions(-) diff --git a/BasicDevTests.py b/BasicDevTests.py index 517cc59a..230450c6 100644 --- a/BasicDevTests.py +++ b/BasicDevTests.py @@ -39,10 +39,11 @@ def TestFilenameLowercase(apath): def PackageAndModuleValidCharacters(apath): """Check pep8 recommendations for package and module names.""" - match = re.match('^[a-z0-9_/.]+$', apath.replace("\\", "/")) + match = re.match("^[a-z0-9_/.]+$", apath.replace("\\", "/")) if match is None: logging.critical( - f"PackageAndModuleValidCharacters failure: package or module name {apath} has something invalid") + f"PackageAndModuleValidCharacters failure: package or module name {apath} has something invalid" + ) return False return True @@ -55,7 +56,9 @@ def TestNoSpaces(apath): def TestRequiredLicense(apath): - licenses = ["SPDX-License-Identifier: BSD-2-Clause-Patent", ] + licenses = [ + "SPDX-License-Identifier: BSD-2-Clause-Patent", + ] try: with open(apath, "rb") as f_obj: contents = f_obj.read().decode() @@ -79,15 +82,15 @@ def TestRequiredLicense(apath): error = 0 for a in py_files: aRelativePath = os.path.relpath(a, os.getcwd()) - if (not TestEncodingOk(a, "ascii")): + if not TestEncodingOk(a, "ascii"): error += 1 - if (not TestFilenameLowercase(aRelativePath)): + if not TestFilenameLowercase(aRelativePath): error += 1 - if (not TestNoSpaces(aRelativePath)): + if not TestNoSpaces(aRelativePath): error += 1 - if (not TestRequiredLicense(a)): + if not TestRequiredLicense(a): error += 1 - if (not PackageAndModuleValidCharacters(aRelativePath)): # use relative path so only test within package + if not PackageAndModuleValidCharacters(aRelativePath): # use relative path so only test within package error += 1 logging.critical(f"Found {error} error(s) in {len(py_files)} file(s)") diff --git a/ConfirmVersionAndTag.py b/ConfirmVersionAndTag.py index 6be5f227..d4d62c86 100644 --- a/ConfirmVersionAndTag.py +++ b/ConfirmVersionAndTag.py @@ -7,13 +7,14 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """Script to check that the wheel/package created is aligned on a git tag.""" + import glob import os import sys p = os.path.join(os.getcwd(), "dist") whl_file = glob.glob(os.path.join(p, "*.whl")) -if (len(whl_file) != 1): +if len(whl_file) != 1: for filename in whl_file: print(filename) raise Exception("Too many wheel files") diff --git a/docs/user/features/creating_invocable.md b/docs/user/features/creating_invocable.md index d9754571..e0a71b6e 100644 --- a/docs/user/features/creating_invocable.md +++ b/docs/user/features/creating_invocable.md @@ -381,7 +381,7 @@ class SettingsManager(UpdateSettingsManager, CiSetupSettingsManager, BinaryBuild logging.warning(f"Deleting {output_dir}") shutil.rmtree(output_dir, ignore_errors=True) os.makedirs(output_dir) - except: + except Exception: pass self.nuget_version = self._GetNextVersion(self.nuget_version) @@ -680,7 +680,7 @@ class SettingsManager(UpdateSettingsManager, CiSetupSettingsManager, BinaryBuild logging.warning(f"Deleting {output_dir}") shutil.rmtree(output_dir, ignore_errors=True) os.makedirs(output_dir) - except: + except Exception: pass self.nuget_version = self._GetNextVersion(self.nuget_version) diff --git a/docs/user/gen_api.py b/docs/user/gen_api.py index 63ff5e27..6a366062 100644 --- a/docs/user/gen_api.py +++ b/docs/user/gen_api.py @@ -10,6 +10,7 @@ Used in conjunction with mkdocs to generate static markdown files for each file inside the edk2toolext package for ReadTheDocs hosting. """ + import mkdocs_gen_files import glob import os @@ -19,11 +20,23 @@ def main(): """Entry into script that is executed.""" files = glob.glob("**/*.py", recursive=True, root_dir="edk2toolext") - excluded_files = ["__init__.py", "capsule_helper.py", "capsule_tool.py", "pyopenssl_signer.py", - "signing_helper.py", "signtool_signer.py", "sig_db_tool.py", - "firmware_policy_tool.py", "image_validation.py", "nuget_publishing.py", - "omnicache.py", "nuget.py", "versioninfo_tool.py", "versioninfo_helper.py", - "secureboot_audit.py"] + excluded_files = [ + "__init__.py", + "capsule_helper.py", + "capsule_tool.py", + "pyopenssl_signer.py", + "signing_helper.py", + "signtool_signer.py", + "sig_db_tool.py", + "firmware_policy_tool.py", + "image_validation.py", + "nuget_publishing.py", + "omnicache.py", + "nuget.py", + "versioninfo_tool.py", + "versioninfo_helper.py", + "secureboot_audit.py", + ] for file_path in files: edit_path = file_path @@ -40,7 +53,7 @@ def main(): filename = f"api{os.sep}{file_path}" with mkdocs_gen_files.open(filename, "w") as f: - ff = file_path.replace(os.sep, '.').replace('.md', '') + ff = file_path.replace(os.sep, ".").replace(".md", "") ff = f"edk2toolext.{ff}" print(f"::: {ff}", file=f) print(" handler: python", file=f) @@ -54,9 +67,9 @@ def main(): print(" show_source: False", file=f) # Point the "Edit on Github" button in the docs to point at the source code - edit_path = os.path.join('..', 'edk2toolext', edit_path) + edit_path = os.path.join("..", "edk2toolext", edit_path) mkdocs_gen_files.set_edit_path(filename, edit_path) - + with mkdocs_gen_files.open("api/.pages", "w") as f: print("title: API Reference", file=f) diff --git a/edk2toolext/base_abstract_invocable.py b/edk2toolext/base_abstract_invocable.py index c65af1e7..39a7faac 100644 --- a/edk2toolext/base_abstract_invocable.py +++ b/edk2toolext/base_abstract_invocable.py @@ -7,6 +7,7 @@ ## """The Base abstract Invocable that all other invocables should inherit from.""" + import logging import os import sys @@ -29,6 +30,7 @@ class BaseAbstractInvocable(object): plugin_manager (plugin_manager.PluginManager): the plugin manager helper (HelperFunctions): container for all helper functions """ + def __init__(self) -> None: """Init the Invocable.""" self.log_filename = None @@ -171,7 +173,7 @@ def ConfigureLogging(self) -> None: !!! tip Optional override in a subclass if new behavior is needed """ - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(self.GetLoggingLevel("base")) # Adjust console mode depending on mode. @@ -182,10 +184,10 @@ def ConfigureLogging(self) -> None: log_directory = os.path.join(self.GetWorkspaceRoot(), self.GetLoggingFolderRelativeToRoot()) txtlogfile = self.GetLoggingLevel("txt") - if (txtlogfile is not None): - logfile, filelogger = edk2_logging.setup_txt_logger(log_directory, - self.GetLoggingFileName("txt"), - txtlogfile) + if txtlogfile is not None: + logfile, filelogger = edk2_logging.setup_txt_logger( + log_directory, self.GetLoggingFileName("txt"), txtlogfile + ) self.log_filename = logfile logging.info("Log Started: " + datetime.strftime(datetime.now(), "%A, %B %d, %Y %I:%M%p")) @@ -206,20 +208,23 @@ def Invoke(self) -> None: # Next, get the environment set up. # (build_env, shell_env) = self_describing_environment.BootstrapEnvironment( - self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories()) + self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories() + ) # Make sure the environment verifies IF it is required for this invocation if self.GetVerifyCheckRequired() and not self_describing_environment.VerifyEnvironment( - self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories()): - raise RuntimeError("External Dependencies in the environment are out of date. " - "Consider running stuart_update to possibly resolve this issue.") + self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories() + ): + raise RuntimeError( + "External Dependencies in the environment are out of date. " + "Consider running stuart_update to possibly resolve this issue." + ) # Load plugins logging.log(edk2_logging.SECTION, "Loading Plugins") self.plugin_manager = plugin_manager.PluginManager() - failedPlugins = self.plugin_manager.SetListOfEnvironmentDescriptors( - build_env.plugins) + failedPlugins = self.plugin_manager.SetListOfEnvironmentDescriptors(build_env.plugins) if failedPlugins: logging.critical("One or more plugins failed to load. Halting build.") for a in failedPlugins: @@ -227,13 +232,13 @@ def Invoke(self) -> None: raise Exception("One or more plugins failed to load.") self.helper = HelperFunctions() - if (self.helper.LoadFromPluginManager(self.plugin_manager) > 0): + if self.helper.LoadFromPluginManager(self.plugin_manager) > 0: raise Exception("One or more helper plugins failed to load.") logging.log(edk2_logging.SECTION, "Start Invocable Tool") retcode = self.Go() logging.log(edk2_logging.SECTION, "Summary") - if (retcode != 0): + if retcode != 0: logging.error("Error") else: edk2_logging.log_progress("Success") diff --git a/edk2toolext/bin/nuget.py b/edk2toolext/bin/nuget.py index f5fe087b..6450f244 100644 --- a/edk2toolext/bin/nuget.py +++ b/edk2toolext/bin/nuget.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """This module contains code that knows how to download nuget.""" + import logging import os import urllib.error @@ -36,7 +37,7 @@ def DownloadNuget(unpack_folder: str = None) -> str: (RuntimeError): Sha256 did not match """ if unpack_folder is None: - unpack_folder = resources.files('edk2toolext.bin') + unpack_folder = resources.files("edk2toolext.bin") out_file_name = Path(unpack_folder) / "NuGet.exe" # check if we have the nuget file already downloaded @@ -44,7 +45,7 @@ def DownloadNuget(unpack_folder: str = None) -> str: logging.debug(f"Attempting to download NuGet to: {out_file_name}") try: # Download the file and save it locally under `temp_file_name` - with urllib.request.urlopen(URL) as response, open(out_file_name, 'wb') as out_file: + with urllib.request.urlopen(URL) as response, open(out_file_name, "wb") as out_file: out_file.write(response.read()) except urllib.error.HTTPError as e: logging.error("We ran into an issue when getting NuGet") @@ -53,6 +54,7 @@ def DownloadNuget(unpack_folder: str = None) -> str: # do the hash to make sure the file is good with open(out_file_name, "rb") as file: import hashlib + temp_file_sha256 = hashlib.sha256(file.read()).hexdigest() if temp_file_sha256.lower() != SHA256.lower(): os.remove(out_file_name) diff --git a/edk2toolext/capsule/capsule_helper.py b/edk2toolext/capsule/capsule_helper.py index 3f6bd51b..bcf0ed1e 100644 --- a/edk2toolext/capsule/capsule_helper.py +++ b/edk2toolext/capsule/capsule_helper.py @@ -14,6 +14,7 @@ binary payloads, along with the functions to standardize the creation of the Windows driver installation files """ + import datetime import os import struct @@ -28,7 +29,7 @@ from edk2toollib.windows.capsule import cat_generator, inf_generator2 # https://docs.microsoft.com/en-us/dotnet/api/system.security.cryptography.pkcs.contentinfo.-ctor?view=netframework-4.8 -PKCS7_SIGNED_DATA_OID = '1.2.840.113549.1.7.2' +PKCS7_SIGNED_DATA_OID = "1.2.840.113549.1.7.2" @dataclass @@ -46,6 +47,7 @@ class CapsulePayload: integrity_data (bytes): integrity data for this payload. optional. integrity_filename (str): integrity filename. optional if integrity_data is None, required otherwise. """ + payload: UefiCapsuleHeaderClass payload_filename: str esrt_guid: uuid.UUID @@ -71,6 +73,7 @@ class Capsule: date (datetime.date): when the capsule was built. optional, defaults to datetime.date.today(). payloads (List[CapsulePayload]): a list of capsule payloads. optional, defaults to empty list """ + version_string: str name: str provider_name: str @@ -89,23 +92,24 @@ def get_capsule_file_name(capsule_options: dict) -> str: def get_normalized_version_string(version_string: str) -> str: """Normalizes a version string that is compatible with INF and CAT files.""" # 19H1 HLK requires a 4 digit version string, or it will fail - while (version_string.count('.') < 3): - version_string += '.0' + while version_string.count(".") < 3: + version_string += ".0" return version_string def get_default_arch() -> str: """Consistently return the default architecture for windows files.""" - return 'amd64' + return "amd64" def get_default_os_string() -> str: """Consistently return the default os for windows files.""" - return 'Win10' + return "Win10" -def build_capsule(capsule_data: bytes, capsule_options: dict, signer_module: object, - signer_options: dict) -> UefiCapsuleHeaderClass: +def build_capsule( + capsule_data: bytes, capsule_options: dict, signer_module: object, signer_options: dict +) -> UefiCapsuleHeaderClass: """Goes through all steps of capsule generation for a single-payload FMP capsule. takes in capsule_data as a byte string, a signer module, and capsule and signer options, @@ -129,8 +133,8 @@ def build_capsule(capsule_data: bytes, capsule_options: dict, signer_module: obj # Start building the capsule as we go. # Create the FMP Payload and set all the necessary options. fmp_payload_header = FmpPayloadHeaderClass() - fmp_payload_header.FwVersion = int(capsule_options['fw_version'], 16) - fmp_payload_header.LowestSupportedVersion = int(capsule_options['lsv_version'], 16) + fmp_payload_header.FwVersion = int(capsule_options["fw_version"], 16) + fmp_payload_header.LowestSupportedVersion = int(capsule_options["lsv_version"], 16) fmp_payload_header.Payload = capsule_data # Create the auth header and get ready to sign the data. @@ -142,16 +146,13 @@ def build_capsule(capsule_data: bytes, capsule_options: dict, signer_module: obj data_to_sign = data_to_sign + struct.pack(" str: os.makedirs(save_path, exist_ok=True) for capsule_payload in capsule.payloads: payload_file_path = os.path.join(save_path, capsule_payload.payload_filename) - with open(payload_file_path, 'wb') as payload_file: + with open(payload_file_path, "wb") as payload_file: payload_file.write(capsule_payload.payload.Encode()) if capsule_payload.integrity_data is not None: - if (capsule_payload.integrity_filename is None): + if capsule_payload.integrity_filename is None: raise ValueError("Integrity data specified, but no integrity filename specified.") integrity_file_path = os.path.join(save_path, capsule_payload.integrity_filename) - with open(integrity_file_path, 'wb') as integrity_file: + with open(integrity_file_path, "wb") as integrity_file: integrity_file.write(capsule_payload.integrity_data) return save_path @@ -237,30 +238,30 @@ def create_inf_file(capsule_options: dict, save_path: str) -> str: NOTE: will save the final file to the save_path with a name determined from the capsule_options """ # Expand the version string prior to creating INF file. - capsule_options['fw_version_string'] = get_normalized_version_string(capsule_options['fw_version_string']) + capsule_options["fw_version_string"] = get_normalized_version_string(capsule_options["fw_version_string"]) # Deal with optional parameters when creating the INF file. - capsule_options['is_rollback'] = capsule_options.get('is_rollback', False) - capsule_options['arch'] = capsule_options.get('arch', get_default_arch()) - capsule_options['mfg_name'] = capsule_options.get('mfg_name', capsule_options['provider_name']) + capsule_options["is_rollback"] = capsule_options.get("is_rollback", False) + capsule_options["arch"] = capsule_options.get("arch", get_default_arch()) + capsule_options["mfg_name"] = capsule_options.get("mfg_name", capsule_options["provider_name"]) inf_file = inf_generator2.InfFile( - capsule_options['fw_name'], - capsule_options['fw_version_string'], + capsule_options["fw_name"], + capsule_options["fw_version_string"], datetime.date.today().strftime("%m/%d/%Y"), - capsule_options['provider_name'], - capsule_options['mfg_name'], - capsule_options['arch'] + capsule_options["provider_name"], + capsule_options["mfg_name"], + capsule_options["arch"], ) inf_file.AddFirmware( "Firmware", - capsule_options['fw_description'], - capsule_options['esrt_guid'], - capsule_options['fw_version'], + capsule_options["fw_description"], + capsule_options["esrt_guid"], + capsule_options["fw_version"], get_capsule_file_name(capsule_options), - Rollback=capsule_options['is_rollback'], - IntegrityFile=capsule_options.get('fw_integrity_file', None) + Rollback=capsule_options["is_rollback"], + IntegrityFile=capsule_options.get("fw_integrity_file", None), ) inf_file_path = os.path.join(save_path, f"{capsule_options['fw_name']}.inf") @@ -286,9 +287,9 @@ def create_multinode_inf_file(capsule: Capsule, save_path: str) -> str: capsule.version_string = get_normalized_version_string(capsule.version_string) # set defaults for non-specified fields - if (capsule.arch is None): + if capsule.arch is None: capsule.arch = get_default_arch() - if (capsule.manufacturer_name is None): + if capsule.manufacturer_name is None: capsule.manufacturer_name = capsule.provider_name inf_file = inf_generator2.InfFile( @@ -297,7 +298,7 @@ def create_multinode_inf_file(capsule: Capsule, save_path: str) -> str: capsule.date.strftime("%m/%d/%Y"), capsule.provider_name, capsule.manufacturer_name, - capsule.arch + capsule.arch, ) idx = 0 @@ -312,7 +313,7 @@ def create_multinode_inf_file(capsule: Capsule, save_path: str) -> str: str(payload.version), payload.payload_filename, Rollback=payload.rollback, - IntegrityFile=payload.integrity_filename + IntegrityFile=payload.integrity_filename, ) inf_file_path = os.path.join(save_path, f"{capsule.name}.inf") @@ -338,17 +339,14 @@ def create_cat_file(capsule_options: dict, save_path: str) -> str: NOTE: will save the final file to the save_path with a name determined from the capsule_options """ # Deal with optional parameters when creating the CAT file. - capsule_options['arch'] = capsule_options.get('arch', get_default_arch()) - capsule_options['os_string'] = capsule_options.get('os_string', get_default_os_string()) + capsule_options["arch"] = capsule_options.get("arch", get_default_arch()) + capsule_options["os_string"] = capsule_options.get("os_string", get_default_os_string()) # Create the CAT. - catgenerator = cat_generator.CatGenerator( - capsule_options['arch'], - capsule_options['os_string'] - ) + catgenerator = cat_generator.CatGenerator(capsule_options["arch"], capsule_options["os_string"]) cat_file_path = os.path.join(save_path, f"{capsule_options['fw_name']}.cat") ret = catgenerator.MakeCat(cat_file_path) - if (ret != 0): + if ret != 0: raise RuntimeError("MakeCat Failed with errorcode %d!" % ret) return cat_file_path diff --git a/edk2toolext/capsule/capsule_tool.py b/edk2toolext/capsule/capsule_tool.py index 97e146c4..2534a737 100644 --- a/edk2toolext/capsule/capsule_tool.py +++ b/edk2toolext/capsule/capsule_tool.py @@ -32,7 +32,7 @@ """ % (os.path.basename(sys.argv[0]),) -def get_cli_options(args: Any=None) -> argparse.Namespace: # noqa: ANN401 +def get_cli_options(args: Any = None) -> argparse.Namespace: # noqa: ANN401 """Parse the primary options from the command line.""" parser = argparse.ArgumentParser(description=TOOL_DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) @@ -40,32 +40,47 @@ def get_cli_options(args: Any=None) -> argparse.Namespace: # noqa: ANN401 # NOTE: At least one signer type is required! signer_group = parser.add_mutually_exclusive_group(required=True) signer_group.add_argument( - '--builtin_signer', choices=[signing_helper.PYOPENSSL_SIGNER, signing_helper.SIGNTOOL_SIGNER]) + "--builtin_signer", choices=[signing_helper.PYOPENSSL_SIGNER, signing_helper.SIGNTOOL_SIGNER] + ) signer_group.add_argument( - '--local_signer', help='a filesystem path to a python module that can be loaded as the active signer') + "--local_signer", help="a filesystem path to a python module that can be loaded as the active signer" + ) signer_group.add_argument( - '--module_signer', help='a python dot-path to a signer module that can be loaded from the current pypath') - - options_help = 'add an option to the corresponding set. format is =' - parser.add_argument('-dc', action='append', dest='capsule_options', type=str, default=[], help=options_help) - parser.add_argument('-ds', action='append', dest='signer_options', type=str, default=[], help=options_help) - - parser.add_argument('-o', dest='options_file', type=argparse.FileType('r'), - help='a filesystem path to a json/yaml file to load with default options. will be overriden by any options parameters') # noqa - parser.add_argument('-f', dest='save_final_options', default=False, action='store_true', - help='optional flag to request that final tool options be saved in a file in the output directory') # noqa + "--module_signer", help="a python dot-path to a signer module that can be loaded from the current pypath" + ) - parser.add_argument('capsule_payload', type=argparse.FileType('rb'), - help='a filesystem path to the binary payload for the capsule') - parser.add_argument('output_dir', - help='a filesystem path to the directory to save output files. if directory does not exist, entire directory path will be created. if directory does exist, contents will be updated based on the capsule_options') # noqa + options_help = "add an option to the corresponding set. format is =" + parser.add_argument("-dc", action="append", dest="capsule_options", type=str, default=[], help=options_help) + parser.add_argument("-ds", action="append", dest="signer_options", type=str, default=[], help=options_help) + + parser.add_argument( + "-o", + dest="options_file", + type=argparse.FileType("r"), + help="a filesystem path to a json/yaml file to load with default options. will be overriden by any options parameters", + ) # noqa + parser.add_argument( + "-f", + dest="save_final_options", + default=False, + action="store_true", + help="optional flag to request that final tool options be saved in a file in the output directory", + ) # noqa + + parser.add_argument( + "capsule_payload", type=argparse.FileType("rb"), help="a filesystem path to the binary payload for the capsule" + ) + parser.add_argument( + "output_dir", + help="a filesystem path to the directory to save output files. if directory does not exist, entire directory path will be created. if directory does exist, contents will be updated based on the capsule_options", + ) # noqa return parser.parse_args(args=args) def load_options_file(in_file: IO) -> Optional[dict]: """Loads a yaml file into a dictionary and returns it.""" - if not hasattr(in_file, 'read'): + if not hasattr(in_file, "read"): return None return yaml.safe_load(in_file) @@ -81,17 +96,17 @@ def update_options(file_options: dict, capsule_options: list[str], signer_option if file_options is not None: updated_options = copy.copy(file_options) else: - updated_options = {'capsule': {}, 'signer': {}} + updated_options = {"capsule": {}, "signer": {}} # Update all the capsule options. for option in capsule_options: - (key, value) = option.split('=') - updated_options['capsule'][key] = value + (key, value) = option.split("=") + updated_options["capsule"][key] = value # Update all the signer options. for option in signer_options: - (key, value) = option.split('=') - updated_options['signer'][key] = value + (key, value) = option.split("=") + updated_options["signer"][key] = value return updated_options @@ -102,10 +117,18 @@ def main() -> None: final_options = update_options(load_options_file(args.options_file), args.capsule_options, args.signer_options) # Verify minimum capsule options. - required_capsule_options = ('fw_name', 'fw_version', 'lsv_version', 'fw_version_string', - 'provider_name', 'fw_description', 'esrt_guid') - missing_capsule_options = tuple(option for option in required_capsule_options - if option not in final_options['capsule']) + required_capsule_options = ( + "fw_name", + "fw_version", + "lsv_version", + "fw_version_string", + "provider_name", + "fw_description", + "esrt_guid", + ) + missing_capsule_options = tuple( + option for option in required_capsule_options if option not in final_options["capsule"] + ) if len(missing_capsule_options) > 0: logging.error("Missing required capsule options: " + ", ".join(missing_capsule_options) + "!") logging.error("Options MUST be provided in either the options file or on the command line.") @@ -121,27 +144,24 @@ def main() -> None: # Now, build the capsule. uefi_capsule_header = capsule_helper.build_capsule( - args.capsule_payload.read(), - final_options['capsule'], - signer, - final_options['signer'] + args.capsule_payload.read(), final_options["capsule"], signer, final_options["signer"] ) # Save the capsule. - capsule_helper.save_capsule(uefi_capsule_header, final_options['capsule'], args.output_dir) + capsule_helper.save_capsule(uefi_capsule_header, final_options["capsule"], args.output_dir) # Build the INF file. - capsule_helper.create_inf_file(final_options['capsule'], args.output_dir) + capsule_helper.create_inf_file(final_options["capsule"], args.output_dir) # Build the CAT file. - capsule_helper.create_cat_file(final_options['capsule'], args.output_dir) + capsule_helper.create_cat_file(final_options["capsule"], args.output_dir) # If requested, save the final options for provenance. if args.save_final_options: - final_options_file = os.path.join(args.output_dir, 'Final_Capsule_Options.yaml') - with open(final_options_file, 'w') as options_file: + final_options_file = os.path.join(args.output_dir, "Final_Capsule_Options.yaml") + with open(final_options_file, "w") as options_file: yaml.dump(final_options, options_file, indent=2) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/edk2toolext/capsule/pyopenssl_signer.py b/edk2toolext/capsule/pyopenssl_signer.py index 7f0273b0..2416a261 100644 --- a/edk2toolext/capsule/pyopenssl_signer.py +++ b/edk2toolext/capsule/pyopenssl_signer.py @@ -13,6 +13,7 @@ This interface abstraction takes in the signature_options and signer_options dictionaries that are used by capsule_tool and capsule_helper. """ + import logging import warnings @@ -39,47 +40,47 @@ def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes: """ # The following _if_ clause handles the deprecated signature_option 'sign_alg' for backwards compatibility # when the deprecated option is supplied, this code adds the new, required options based on prior code behavior - if 'sign_alg' in signature_options: + if "sign_alg" in signature_options: warnings.warn('Signature_option "sign_alg" is deprecated, use "type"', DeprecationWarning) - if signature_options['sign_alg'] == 'pkcs12': + if signature_options["sign_alg"] == "pkcs12": # map legacy behavior to new options and backwards-compatible values - signature_options['type'] = 'bare' - signature_options['encoding'] = 'binary' - signer_options['key_file_format'] = 'pkcs12' + signature_options["type"] = "bare" + signature_options["encoding"] = "binary" + signer_options["key_file_format"] = "pkcs12" else: raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!") - ''' signature type 'bare' is just a binary signed digest, no PEM headers/footers or ASN ''' - if signature_options['type'] != 'bare': + """ signature type 'bare' is just a binary signed digest, no PEM headers/footers or ASN """ + if signature_options["type"] != "bare": raise ValueError(f"Unsupported signature type: {signature_options['type']}") - if 'type_options' in signature_options: + if "type_options" in signature_options: raise ValueError("Signature type options not supported") - if signature_options['encoding'] != 'binary': + if signature_options["encoding"] != "binary": raise ValueError(f"Unsupported signature encoding: {signature_options['encoding']}") - if signature_options['hash_alg'] != 'sha256': + if signature_options["hash_alg"] != "sha256": raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}") - if signer_options['key_file_format'] != 'pkcs12': + if signer_options["key_file_format"] != "pkcs12": raise ValueError(f"Unsupported signer key file format: {signer_options['key_file_format']}") logging.debug("Executing PKCS1 Signing") # If a key file is provided, use it for signing. - if 'key_file' in signer_options: - with open(signer_options['key_file'], 'rb') as key_file: - signer_options['key_data'] = key_file.read() + if "key_file" in signer_options: + with open(signer_options["key_file"], "rb") as key_file: + signer_options["key_data"] = key_file.read() - if not isinstance(signer_options['key_data'], bytes): - signer_options['key_data'] = signer_options['key_data'].encode() + if not isinstance(signer_options["key_data"], bytes): + signer_options["key_data"] = signer_options["key_data"].encode() password = None - if 'key_file_password' in signer_options: - password = signer_options['key_file_password'] + if "key_file_password" in signer_options: + password = signer_options["key_file_password"] if not isinstance(password, bytes): password = password.encode() # TODO: Figure out OIDs. # TODO: Figure out EKU. - pkcs12 = load_pkcs12(signer_options['key_data'], password) + pkcs12 = load_pkcs12(signer_options["key_data"], password) pkey = crypto.PKey.from_cryptography_key(pkcs12.key) - return crypto.sign(pkey, data, signature_options['hash_alg']) + return crypto.sign(pkey, data, signature_options["hash_alg"]) diff --git a/edk2toolext/capsule/signing_helper.py b/edk2toolext/capsule/signing_helper.py index 4ee20de3..40468e32 100644 --- a/edk2toolext/capsule/signing_helper.py +++ b/edk2toolext/capsule/signing_helper.py @@ -24,13 +24,13 @@ from edk2toolext.capsule import signtool_signer # Valid types. -PYOPENSSL_SIGNER = 'pyopenssl' -SIGNTOOL_SIGNER = 'signtool' -PYPATH_MODULE_SIGNER = 'pymodule' -LOCAL_MODULE_SIGNER = 'local_module' +PYOPENSSL_SIGNER = "pyopenssl" +SIGNTOOL_SIGNER = "signtool" +PYPATH_MODULE_SIGNER = "pymodule" +LOCAL_MODULE_SIGNER = "local_module" -def get_signer(type: str, specifier: Optional[str]=None) -> Any: # noqa: ANN401 +def get_signer(type: str, specifier: Optional[str] = None) -> Any: # noqa: ANN401 """Load a signer module based on the arguments. if type is PYPATH_MODULE_SIGNER, the specifier should be the python module. @@ -46,9 +46,10 @@ def get_signer(type: str, specifier: Optional[str]=None) -> Any: # noqa: ANN401 if type == PYOPENSSL_SIGNER: try: from edk2toolext.capsule import pyopenssl_signer + return pyopenssl_signer except ModuleNotFoundError: - raise RuntimeError('PyOpenSsl Signer failed to load. Do you have pyopenssl installed?') + raise RuntimeError("PyOpenSsl Signer failed to load. Do you have pyopenssl installed?") elif type == SIGNTOOL_SIGNER: return signtool_signer elif type == PYPATH_MODULE_SIGNER: diff --git a/edk2toolext/capsule/signtool_signer.py b/edk2toolext/capsule/signtool_signer.py index 8281311a..e52ee98d 100644 --- a/edk2toolext/capsule/signtool_signer.py +++ b/edk2toolext/capsule/signtool_signer.py @@ -19,6 +19,7 @@ Will attempt to locate a valid installation of Windows Signtool using the utility_functions provided by edk2toollib. """ + import os import tempfile import warnings @@ -27,9 +28,7 @@ from edk2toollib.windows import locate_tools GLOBAL_SIGNTOOL_PATH = None -SUPPORTED_SIGNATURE_TYPE_OPTIONS = { - 'pkcs7': {'detachedSignedData', 'embedded', 'pkcs7DetachedSignedData'} -} +SUPPORTED_SIGNATURE_TYPE_OPTIONS = {"pkcs7": {"detachedSignedData", "embedded", "pkcs7DetachedSignedData"}} def get_signtool_path() -> None: @@ -41,7 +40,7 @@ def get_signtool_path() -> None: global GLOBAL_SIGNTOOL_PATH if GLOBAL_SIGNTOOL_PATH is None: - GLOBAL_SIGNTOOL_PATH = locate_tools.FindToolInWinSdk('signtool.exe') + GLOBAL_SIGNTOOL_PATH = locate_tools.FindToolInWinSdk("signtool.exe") return GLOBAL_SIGNTOOL_PATH @@ -65,39 +64,39 @@ def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes: """ # The following _if_ clause handles the deprecated signature_option 'sign_alg' for backwards compatibility # when the deprecated option is supplied, this code adds the new, required options based on prior code behavior - if 'sign_alg' in signature_options: + if "sign_alg" in signature_options: warnings.warn('Signature_option "sign_alg" is deprecated, use "type"', DeprecationWarning) - if signature_options['sign_alg'] == 'pkcs12': + if signature_options["sign_alg"] == "pkcs12": # map legacy behavior to new options and backwards-compatible values - signature_options['type'] = 'pkcs7' - signature_options['type_options'] = {'detachedSignedData'} - signature_options['encoding'] = 'DER' - signer_options['key_file_format'] = 'pkcs12' + signature_options["type"] = "pkcs7" + signature_options["type_options"] = {"detachedSignedData"} + signature_options["encoding"] = "DER" + signer_options["key_file_format"] = "pkcs12" else: raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!") - if signature_options['type'] != 'pkcs7': + if signature_options["type"] != "pkcs7": raise ValueError(f"Unsupported signature type: {signature_options['type']}!") - for opt in signature_options['type_options']: - if opt not in SUPPORTED_SIGNATURE_TYPE_OPTIONS[signature_options['type']]: + for opt in signature_options["type_options"]: + if opt not in SUPPORTED_SIGNATURE_TYPE_OPTIONS[signature_options["type"]]: raise ValueError(f"Unsupported type option: {opt}! Ensure you have provied a set") - mutually_exclusive_options = ('embedded', 'detachedSignedData', 'pkcs7DetachedSignedData') + mutually_exclusive_options = ("embedded", "detachedSignedData", "pkcs7DetachedSignedData") option_found = None for option in mutually_exclusive_options: - if option in signature_options['type_options']: + if option in signature_options["type_options"]: if option_found is None: option_found = option else: raise ValueError("type_options '%s' and '%s' are mutually exclusive" % (option_found, option)) - if signature_options['encoding'] != 'DER': + if signature_options["encoding"] != "DER": raise ValueError(f"Unsupported signature encoding: {signature_options['type']}!") - if signature_options['hash_alg'] != 'sha256': + if signature_options["hash_alg"] != "sha256": raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}!") - if 'key_file' not in signer_options: + if "key_file" not in signer_options: raise ValueError("Must supply a key_file in signer_options for Signtool!") - if signer_options['key_file_format'] != 'pkcs12': + if signer_options["key_file_format"] != "pkcs12": raise ValueError(f"Unsupported key file format: {signer_options['key_file_format']}!") # Set up a temp directory to hold input and output files. @@ -105,31 +104,31 @@ def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes: in_file_path = os.path.join(temp_folder, "data_to_sign.bin") # Create the input file for Signtool. - in_file = open(in_file_path, 'wb') + in_file = open(in_file_path, "wb") in_file.write(data) in_file.close() # Start building the parameters for the call. - signtool_params = ['sign'] - signtool_params += ['/fd', signature_options['hash_alg']] - if 'detachedSignedData' in signature_options['type_options']: - signtool_params += ['/p7ce', 'DetachedSignedData'] - elif 'pkcs7DetachedSignedData' in signature_options['type_options']: - signtool_params += ['/p7ce', 'PKCS7DetachedSignedData'] - elif 'embedded' in signature_options['type_options']: - signtool_params += ['/p7ce', 'Embedded'] + signtool_params = ["sign"] + signtool_params += ["/fd", signature_options["hash_alg"]] + if "detachedSignedData" in signature_options["type_options"]: + signtool_params += ["/p7ce", "DetachedSignedData"] + elif "pkcs7DetachedSignedData" in signature_options["type_options"]: + signtool_params += ["/p7ce", "PKCS7DetachedSignedData"] + elif "embedded" in signature_options["type_options"]: + signtool_params += ["/p7ce", "Embedded"] else: raise ValueError("For pkcs7, type_options must include either embedded or detachedSignedData") - signtool_params += ['/p7', f'"{temp_folder}"'] - signtool_params += ['/f', f"\"{signer_options['key_file']}\""] - if 'oid' in signer_options: - signtool_params += ['/p7co', signer_options['oid']] - if 'eku' in signer_options: - signtool_params += ['/u', signer_options['eku']] - if 'key_pass' in signer_options: - signtool_params += ['/p', signer_options['key_pass']] + signtool_params += ["/p7", f'"{temp_folder}"'] + signtool_params += ["/f", f"\"{signer_options['key_file']}\""] + if "oid" in signer_options: + signtool_params += ["/p7co", signer_options["oid"]] + if "eku" in signer_options: + signtool_params += ["/u", signer_options["eku"]] + if "key_pass" in signer_options: + signtool_params += ["/p", signer_options["key_pass"]] # Add basic options. - signtool_params += ['/debug', '/v', f'"{in_file_path}"'] + signtool_params += ["/debug", "/v", f'"{in_file_path}"'] # Make the call to Signtool. ret = RunCmd(get_signtool_path(), " ".join(signtool_params)) @@ -138,7 +137,7 @@ def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes: # Load the data from the output file and return it. out_file_path = os.path.join(temp_folder, "data_to_sign.bin.p7") - out_file = open(out_file_path, 'rb') + out_file = open(out_file_path, "rb") out_data = out_file.read() out_file.close() @@ -163,25 +162,25 @@ def sign_in_place(sign_file_path: str, signature_options: dict, signer_options: """ # NOTE: Currently, we only support the necessary algorithms for capsules. - if signature_options['sign_alg'] != 'pkcs12': + if signature_options["sign_alg"] != "pkcs12": raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!") - if signature_options['hash_alg'] != 'sha256': + if signature_options["hash_alg"] != "sha256": raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}!") - if 'key_file' not in signer_options: + if "key_file" not in signer_options: raise ValueError("Must supply a key_file in signer_options for Signtool!") # Start building the parameters for the call. - signtool_params = ['sign', '/a'] - signtool_params += ['/fd', signature_options['hash_alg']] - signtool_params += ['/f', f"\"{signer_options['key_file']}\""] + signtool_params = ["sign", "/a"] + signtool_params += ["/fd", signature_options["hash_alg"]] + signtool_params += ["/f", f"\"{signer_options['key_file']}\""] # if 'oid' in signer_options: # signtool_params += ['/p7co', signer_options['oid']] # if 'eku' in signer_options: # signtool_params += ['/u', signer_options['eku']] - if 'key_pass' in signer_options: - signtool_params += ['/p', signer_options['key_pass']] + if "key_pass" in signer_options: + signtool_params += ["/p", signer_options["key_pass"]] # Add basic options. - signtool_params += ['/debug', '/v', f'"{sign_file_path}"'] + signtool_params += ["/debug", "/v", f'"{sign_file_path}"'] # Make the call to Signtool. ret = RunCmd(get_signtool_path(), " ".join(signtool_params)) diff --git a/edk2toolext/codeql.py b/edk2toolext/codeql.py index d749f25a..4a92205f 100644 --- a/edk2toolext/codeql.py +++ b/edk2toolext/codeql.py @@ -12,6 +12,7 @@ consistent command-line usage across repos, and define standard scopes that other plugins and tools can depend on for CodeQL operations. """ + from argparse import ArgumentParser, Namespace from typing import Tuple @@ -28,11 +29,12 @@ def add_command_line_option(parser: ArgumentParser) -> None: """ parser.add_argument( - '--codeql', - dest='codeql', - action='store_true', + "--codeql", + dest="codeql", + action="store_true", default=False, - help="Optional - Produces CodeQL results from the build.") + help="Optional - Produces CodeQL results from the build.", + ) def get_scopes(codeql_enabled: bool) -> Tuple[str]: @@ -78,7 +80,4 @@ def set_audit_only_mode(uefi_builder: UefiBuilder) -> None: build. """ - uefi_builder.env.SetValue( - "STUART_CODEQL_AUDIT_ONLY", - "true", - "Platform Defined") + uefi_builder.env.SetValue("STUART_CODEQL_AUDIT_ONLY", "true", "Platform Defined") diff --git a/edk2toolext/edk2_invocable.py b/edk2toolext/edk2_invocable.py index 73a6a39f..48c6c926 100644 --- a/edk2toolext/edk2_invocable.py +++ b/edk2toolext/edk2_invocable.py @@ -16,6 +16,7 @@ should be platform agnostic and work for any platform. Platform specific data is provided via the Edk2InvocableSettingsInterface. """ + import argparse import inspect import logging @@ -34,7 +35,7 @@ from edk2toolext.environment import shell_environment, version_aggregator -class Edk2InvocableSettingsInterface(): +class Edk2InvocableSettingsInterface: """Settings APIs to support an Edk2Invocable. This is an interface definition only to show which functions are @@ -185,13 +186,14 @@ class Edk2Invocable(BaseAbstractInvocable): !!! warning This Invocable should only be subclassed if creating a new invocable """ + def __init__(self) -> None: """Init the Invocable.""" super().__init__() self.PlatformSettings = None @classmethod - def collect_python_pip_info(cls: 'Edk2Invocable') -> None: + def collect_python_pip_info(cls: "Edk2Invocable") -> None: """Class method to collect all pip packages names and versions. Reports them to the global version_aggregator as well as print them to the screen. @@ -209,7 +211,7 @@ def collect_python_pip_info(cls: 'Edk2Invocable') -> None: ver_agg.ReportVersion(package.project_name, version, version_aggregator.VersionTypes.PIP) @classmethod - def collect_rust_info(cls: 'Edk2Invocable') -> None: + def collect_rust_info(cls: "Edk2Invocable") -> None: """Class method to collect Rust tool versions. Reports them to the global version_aggregator as well as print them to the screen. @@ -225,20 +227,17 @@ def get_rust_tool_version(tool_name: str, tool_params: str = "--version") -> str else: return "N/A" - tools = { - "cargo": ("cargo",), - "cargo make": ("cargo", "make --version"), - "rustc": ("rustc",) - } + tools = {"cargo": ("cargo",), "cargo make": ("cargo", "make --version"), "rustc": ("rustc",)} for tool_name, tool_cmd in tools.items(): ver = get_rust_tool_version(*tool_cmd) - match = re.search(r'(\d+\.\d+\.\d+)', ver) + match = re.search(r"(\d+\.\d+\.\d+)", ver) if match: ver = match.group(1) elif ver != "N/A": - raise Exception("A Rust tool is installed, but its version " - "format is unexpected and cannot be parsed.") + raise Exception( + "A Rust tool is installed, but its version " "format is unexpected and cannot be parsed." + ) logging.info(f"{tool_name} version: {ver}") ver_agg = version_aggregator.GetVersionAggregator() @@ -290,11 +289,11 @@ def GetActiveScopes(self) -> tuple[str]: # Add any OS-specific scope. if GetHostInfo().os == "Windows": - scopes += ('global-win',) + scopes += ("global-win",) elif GetHostInfo().os == "Linux": - scopes += ('global-nix',) + scopes += ("global-nix",) # Add the global scope. To be deprecated. - scopes += ('global',) + scopes += ("global",) return scopes def GetLoggingLevel(self, loggerType: str) -> int: @@ -372,7 +371,7 @@ def AddParserEpilog(self) -> str: Returns: (str): The string to be added to the end of the argument parser. """ - epilog = dedent('''\ + epilog = dedent("""\ CLI Env Guide: = - Set an env variable for the pre/post build process - Set a non-valued env variable for the pre/post build process @@ -382,7 +381,7 @@ def AddParserEpilog(self) -> str: BLD__= - Set a build flag for build type of (key=value will get passed to build process for given build type) BLD__ - Set a non-valued build flag for a build type of - ''') + """) return epilog def ParseCommandLineOptions(self) -> None: @@ -399,20 +398,25 @@ def ParseCommandLineOptions(self) -> None: epilog=self.AddParserEpilog(), ) - settingsParserObj.add_argument('-h', '--help', dest="help", action="store_true", - help='show this help message and exit') - settingsParserObj.add_argument('-c', '--platform_module', dest='platform_module', - default="PlatformBuild.py", type=str, - help='Provide the Platform Module relative to the current working directory.' - f'This should contain a {self.GetSettingsClass().__name__} instance.') + settingsParserObj.add_argument( + "-h", "--help", dest="help", action="store_true", help="show this help message and exit" + ) + settingsParserObj.add_argument( + "-c", + "--platform_module", + dest="platform_module", + default="PlatformBuild.py", + type=str, + help="Provide the Platform Module relative to the current working directory." + f"This should contain a {self.GetSettingsClass().__name__} instance.", + ) # get the settings manager from the provided file and load an instance settingsArg, unknown_args = settingsParserObj.parse_known_args() try: self.PlatformModule = import_module_by_file_name(os.path.abspath(settingsArg.platform_module)) - self.PlatformSettings = locate_class_in_module( - self.PlatformModule, self.GetSettingsClass())() - except (TypeError): + self.PlatformSettings = locate_class_in_module(self.PlatformModule, self.GetSettingsClass())() + except TypeError: # Gracefully exit if the file we loaded isn't the right type class_name = self.GetSettingsClass().__name__ print(f"Unable to use {settingsArg.platform_module} as a {class_name}") @@ -436,11 +440,13 @@ def ParseCommandLineOptions(self) -> None: settingsParserObj.print_help() sys.exit(1) - except (FileNotFoundError): + except FileNotFoundError: if settingsArg.help: try: - print("WARNING: Some command line arguments and possible values for arguments may be missing. " - "Provide a PLATFORM_MODULE file to ensure all command line arguments are present.\n") + print( + "WARNING: Some command line arguments and possible values for arguments may be missing. " + "Provide a PLATFORM_MODULE file to ensure all command line arguments are present.\n" + ) self.AddCommandLineOptions(settingsParserObj) except Exception: pass @@ -461,7 +467,9 @@ def ParseCommandLineOptions(self) -> None: warnings.filterwarnings("default", category=DeprecationWarning, module=self.PlatformModule.__name__) # instantiate the second argparser that will get passed around - parserObj = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,) + parserObj = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + ) # first pass it to the subclass self.AddCommandLineOptions(parserObj) @@ -472,10 +480,22 @@ def ParseCommandLineOptions(self) -> None: default_build_config_path = os.path.join(self.GetWorkspaceRoot(), "BuildConfig.conf") # add the common stuff that everyone will need - parserObj.add_argument('--build-config', dest='build_config', default=default_build_config_path, type=str, - help='Provide shell variables in a file') - parserObj.add_argument('--verbose', '--VERBOSE', '-v', dest="verbose", action='store_true', default=False, - help='Overrides platform settings and sets all loggers to verbose (logging.DEBUG).') + parserObj.add_argument( + "--build-config", + dest="build_config", + default=default_build_config_path, + type=str, + help="Provide shell variables in a file", + ) + parserObj.add_argument( + "--verbose", + "--VERBOSE", + "-v", + dest="verbose", + action="store_true", + default=False, + help="Overrides platform settings and sets all loggers to verbose (logging.DEBUG).", + ) # set the epilog to display with --help, -h parserObj.epilog = self.AddParserEpilog() @@ -510,9 +530,11 @@ def ParseCommandLineOptions(self) -> None: tokens = argument.strip().split("=") env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From CmdLine") elif argument.count("=") == 0 and not argument.startswith(("-", "/")): - env.SetValue(argument.strip().upper(), - ''.join(choice(ascii_letters) for _ in range(20)), - "Non valued variable set From cmdLine") + env.SetValue( + argument.strip().upper(), + "".join(choice(ascii_letters) for _ in range(20)), + "Non valued variable set From cmdLine", + ) else: print(f"error: unexpected argument: [{argument}]. Pass --help for command information.") sys.exit(-1) @@ -532,8 +554,10 @@ def ParseCommandLineOptions(self) -> None: tokens = argument.strip().split("=") env.SetValue(tokens[0].strip().upper(), tokens[1].strip(), "From BuildConf") elif argument.count("=") == 0: - env.SetValue(argument.strip().upper(), - ''.join(choice(ascii_letters) for _ in range(20)), - "Non valued variable set from BuildConfig") + env.SetValue( + argument.strip().upper(), + "".join(choice(ascii_letters) for _ in range(20)), + "Non valued variable set from BuildConfig", + ) else: raise RuntimeError(f"Unknown variable passed in via BuildConfig: {argument}") diff --git a/edk2toolext/edk2_logging.py b/edk2toolext/edk2_logging.py index 2da2ac9f..5afbec4b 100644 --- a/edk2toolext/edk2_logging.py +++ b/edk2toolext/edk2_logging.py @@ -15,6 +15,7 @@ Splits logs into a master log and per package log. """ + import logging import os import re @@ -47,7 +48,7 @@ # sub_directory is relative to ws argument -def clean_build_logs(ws: str, sub_directory: Optional[str]=None) -> None: +def clean_build_logs(ws: str, sub_directory: Optional[str] = None) -> None: """Removes all build logs.""" # Make sure that we have a clean environment. if sub_directory is None: @@ -71,7 +72,7 @@ def get_progress_level() -> int: return PROGRESS -def get_edk2_filter(verbose: bool=False) -> logging.Filter: +def get_edk2_filter(verbose: bool = False) -> logging.Filter: """Returns an edk2 filter.""" gEdk2Filter = Edk2LogFilter() if verbose: @@ -102,11 +103,11 @@ def setup_section_level() -> None: # creates the the plaintext logger def setup_txt_logger( directory: str, - filename: str="log", - logging_level: int=logging.INFO, - formatter: Optional[logging.Formatter]=None, - logging_namespace: Optional[str]='', - isVerbose: bool=False + filename: str = "log", + logging_level: int = logging.INFO, + formatter: Optional[logging.Formatter] = None, + logging_namespace: Optional[str] = "", + isVerbose: bool = False, ) -> tuple: """Configures a text logger.""" logger = logging.getLogger(logging_namespace) @@ -124,7 +125,7 @@ def setup_txt_logger( os.remove(logfile_path) # Create file logger - filelogger = file_handler.FileHandler(filename=(logfile_path), mode='a') + filelogger = file_handler.FileHandler(filename=(logfile_path), mode="a") filelogger.setLevel(logging_level) filelogger.setFormatter(log_formatter) logger.addHandler(filelogger) @@ -136,12 +137,12 @@ def setup_txt_logger( # sets up a colored console logger def setup_console_logging( - logging_level: int=logging.INFO, - formatter: Optional[logging.Formatter]=None, - logging_namespace: Optional[str]='', - isVerbose: bool=False, - use_azure_colors: bool=False, - use_color: bool=True + logging_level: int = logging.INFO, + formatter: Optional[logging.Formatter] = None, + logging_namespace: Optional[str] = "", + isVerbose: bool = False, + use_azure_colors: bool = False, + use_color: bool = True, ) -> logging.Handler: """Configures a console logger. @@ -180,7 +181,9 @@ def setup_console_logging( return safeHandler -def stop_logging(loghandle: Union[list[logging.Handler], logging.Handler], logging_namespace: Optional[str]='') -> None: +def stop_logging( + loghandle: Union[list[logging.Handler], logging.Handler], logging_namespace: Optional[str] = "" +) -> None: """Stops logging on a log handle.""" logger = logging.getLogger(logging_namespace) if loghandle is None: @@ -195,7 +198,7 @@ def stop_logging(loghandle: Union[list[logging.Handler], logging.Handler], loggi logger.removeHandler(loghandle) -def create_output_stream(level: int=logging.INFO, logging_namespace: Optional[str]='') -> logging.Handler: +def create_output_stream(level: int = logging.INFO, logging_namespace: Optional[str] = "") -> logging.Handler: """Creates an output stream to log to.""" # creates an output stream that is in memory if string_handler: @@ -208,7 +211,7 @@ def create_output_stream(level: int=logging.INFO, logging_namespace: Optional[st return handler -def remove_output_stream(handler: logging.Handler, logging_namespace: Optional[str]='') -> None: +def remove_output_stream(handler: logging.Handler, logging_namespace: Optional[str] = "") -> None: """Removes an output stream to log to.""" logger = logging.getLogger(logging_namespace) if isinstance(handler, list): @@ -225,13 +228,15 @@ def scan_compiler_output(output_stream: TextIO) -> list[tuple]: (list[tuple[logging.Type, str]]): list of tuples containing the type of issue (Error, warning) and the description. """ + # seek to the start of the output stream - def output_compiler_error(match: re.Match, line: str, start_txt:str="Compiler") ->str: + def output_compiler_error(match: re.Match, line: str, start_txt: str = "Compiler") -> str: start, end = match.span() source = line[:start].strip() error = line[end:].strip() num = match.group(1) return f"{start_txt} #{num} from {source} {error}" + problems = [] output_stream.seek(0, 0) error_exp = re.compile(r"error [A-EG-Z]?(\d+):") @@ -282,6 +287,7 @@ def output_compiler_error(match: re.Match, line: str, start_txt:str="Compiler") class Edk2LogFilter(logging.Filter): """Subclass of logging.Filter.""" + _allowedLoggers = ["root", "git.cmd", "edk2toolext.environment.repo_resolver"] def __init__(self) -> None: @@ -303,7 +309,7 @@ def __init__(self) -> None: self.secrets_regex = re.compile(r"{}".format("|".join(secrets_regex_strings)), re.IGNORECASE) - def setVerbose(self, isVerbose: bool=True) -> None: + def setVerbose(self, isVerbose: bool = True) -> None: """Sets the filter verbosity.""" self._verbose = isVerbose diff --git a/edk2toolext/edk2_report.py b/edk2toolext/edk2_report.py index 7ef8e85a..8fe80164 100644 --- a/edk2toolext/edk2_report.py +++ b/edk2toolext/edk2_report.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An executable that allows a user to select a report and execute it on a given database.""" + import glob import logging import pathlib @@ -23,7 +24,7 @@ def setup_logging(verbose: bool) -> None: """Setup logging for the tool.""" - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.DEBUG if verbose else logging.INFO) edk2_logging.setup_section_level() edk2_logging.setup_console_logging(logging.DEBUG if verbose else logging.INFO) @@ -33,15 +34,21 @@ def setup_logging(verbose: bool) -> None: def parse_args() -> Namespace: """Parse the arguments for the tool.""" parser = ArgumentParser("A tool to generate reports on a edk2 workspace.") - parser.add_argument('--verbose', '--VERBOSE', '-v', dest="verbose", action='store_true', default=False, - help='verbose') - parser.add_argument('-db', '--database', '--DATABASE', dest='database', - default=str(pathlib.Path("Build","DATABASE.db")), - help="The database to use when generating reports. Can be a comma separated list of db's to " - "merge. Globbing is supported.") + parser.add_argument( + "--verbose", "--VERBOSE", "-v", dest="verbose", action="store_true", default=False, help="verbose" + ) + parser.add_argument( + "-db", + "--database", + "--DATABASE", + dest="database", + default=str(pathlib.Path("Build", "DATABASE.db")), + help="The database to use when generating reports. Can be a comma separated list of db's to " + "merge. Globbing is supported.", + ) # Register the report arguments as subparser - subparsers = parser.add_subparsers(dest='cmd', required=[]) + subparsers = parser.add_subparsers(dest="cmd", required=[]) for report in REPORTS: name, description = report.report_info() report_parser = subparsers.add_parser(name, description=description) @@ -49,13 +56,15 @@ def parse_args() -> Namespace: return parser.parse_args() + def main() -> int: """Main functionality of the executable.""" args = parse_args() setup_logging(args.verbose) - logging.warning("stuart_report is in active development. Please report any issues to the edk2-pytool-extensions " - "repo.") + logging.warning( + "stuart_report is in active development. Please report any issues to the edk2-pytool-extensions " "repo." + ) # Verify arguments to_merge = [] database_list = args.database.split(",") @@ -86,7 +95,7 @@ def main() -> int: cmd = args.cmd del args.cmd - db = Edk2DB(db_path = db_path) + db = Edk2DB(db_path=db_path) for report in REPORTS: name, _ = report.report_info() if name == cmd: @@ -100,7 +109,7 @@ def go() -> None: Sets up the logger for the tool and then runs the tool. """ # setup main console as logger - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(levelname)s - %(message)s") console = logging.StreamHandler() @@ -120,5 +129,5 @@ def go() -> None: sys.exit(retcode) -if __name__ == '__main__': +if __name__ == "__main__": go() diff --git a/edk2toolext/environment/conf_mgmt.py b/edk2toolext/environment/conf_mgmt.py index ec4821e3..d0eec816 100644 --- a/edk2toolext/environment/conf_mgmt.py +++ b/edk2toolext/environment/conf_mgmt.py @@ -10,6 +10,7 @@ Customized for edk2-pytool-extensions based build and support dynamic Visual studio support 2017++ """ + import logging import os import shutil @@ -18,8 +19,9 @@ from edk2toolext.environment import version_aggregator -class ConfMgmt(): +class ConfMgmt: """Handles Edk2 Conf Management.""" + def __init__(self) -> None: """Init an empty ConfMgmt object.""" self.Logger = logging.getLogger("ConfMgmt") @@ -65,9 +67,8 @@ def populate_conf_dir(self, conf_folder_path: str, override_conf: bool, conf_tem template_file_path = p break - if (template_file_path is None): - self.Logger.critical( - "Failed to find Template file for %s" % outfiles[x]) + if template_file_path is None: + self.Logger.critical("Failed to find Template file for %s" % outfiles[x]) raise Exception("Template File Missing", outfiles[x]) else: self.Logger.debug(f"Conf file template: {template_file_path}") @@ -76,8 +77,9 @@ def populate_conf_dir(self, conf_folder_path: str, override_conf: bool, conf_tem self._copy_conf_file_if_necessary(outfiles[x], template_file_path, override_conf) # Log Version for reporting - version_aggregator.GetVersionAggregator().ReportVersion(outfiles[x], self._get_version(outfiles[x]), - version_aggregator.VersionTypes.INFO) + version_aggregator.GetVersionAggregator().ReportVersion( + outfiles[x], self._get_version(outfiles[x]), version_aggregator.VersionTypes.INFO + ) def _get_version(self, conf_file: str) -> str: """Parse the version from the conf_file. @@ -90,7 +92,7 @@ def _get_version(self, conf_file: str) -> str: version = "0.0" with open(conf_file, "r") as f: for line in f.readlines(): - if (line.startswith("#!VERSION=")): + if line.startswith("#!VERSION="): try: version = str(float(line.split("=")[1].split()[0].strip())) break @@ -120,7 +122,7 @@ def _is_older_version(self, conf_file: str, template_file: str) -> bool: except Exception: logging.error("Failed to get version from file") finally: - return (conf < template) + return conf < template def _copy_conf_file_if_necessary(self, conf_file: str, template_file: str, override_conf: bool) -> None: """Copy template_file to conf_file if policy applies. @@ -135,14 +137,14 @@ def _copy_conf_file_if_necessary(self, conf_file: str, template_file: str, overr self.Logger.debug(f"{conf_file} file not found. Creating from Template file {template_file}") shutil.copy2(template_file, conf_file) - elif (override_conf): + elif override_conf: # caller requested override even for existing file self.Logger.debug(f"{conf_file} file replaced as requested") shutil.copy2(template_file, conf_file) else: # Both file exists. Do a quick version check - if (self._is_older_version(conf_file, template_file)): + if self._is_older_version(conf_file, template_file): # Conf dir file is older. Warn user. self.Logger.critical(f"{conf_file} file is out-of-date. Please update your conf files!") self.Logger.critical("Sleeping 30 seconds to encourage update....") diff --git a/edk2toolext/environment/environment_descriptor_files.py b/edk2toolext/environment/environment_descriptor_files.py index f3468fe6..413e6f93 100644 --- a/edk2toolext/environment/environment_descriptor_files.py +++ b/edk2toolext/environment/environment_descriptor_files.py @@ -12,6 +12,7 @@ It can parse the files, validate them, and return objects representing their contents. """ + import os import yaml @@ -27,6 +28,7 @@ class PathEnv(object): descriptor_location (string): location of the PathEnv published_path (string): location of the PathEnv """ + def __init__(self, descriptor: dict) -> None: """Init with the descriptor information.""" super(PathEnv, self).__init__() @@ -34,12 +36,11 @@ def __init__(self, descriptor: dict) -> None: # # Set the data for this object. # - self.scope = descriptor['scope'] - self.flags = descriptor['flags'] - self.var_name = descriptor.get('var_name', None) + self.scope = descriptor["scope"] + self.flags = descriptor["flags"] + self.var_name = descriptor.get("var_name", None) - self.descriptor_location = os.path.dirname( - descriptor['descriptor_file']) + self.descriptor_location = os.path.dirname(descriptor["descriptor_file"]) self.published_path = self.descriptor_location @@ -50,6 +51,7 @@ class DescriptorFile(object): file_path (str): descriptor file path descriptor_contents (Dict): Contents of the descriptor file """ + def __init__(self, file_path: str) -> None: """Loads the contents of the descriptor file and validates. @@ -64,7 +66,7 @@ def __init__(self, file_path: str) -> None: self.file_path = file_path self.descriptor_contents = None - with open(file_path, 'r') as file: + with open(file_path, "r") as file: try: self.descriptor_contents = yaml.safe_load(file) except Exception: @@ -74,29 +76,28 @@ def __init__(self, file_path: str) -> None: # Make sure that we loaded the file successfully. # if self.descriptor_contents is None: - raise ValueError( - "Could not load contents of descriptor file '%s'!" % file_path) + raise ValueError("Could not load contents of descriptor file '%s'!" % file_path) # The file path is an implicit descriptor field. - self.descriptor_contents['descriptor_file'] = self.file_path + self.descriptor_contents["descriptor_file"] = self.file_path # All files require a scope. - if 'scope' not in self.descriptor_contents: - raise ValueError("File '%s' missing required field '%s'!" % - (self.file_path, 'scope')) + if "scope" not in self.descriptor_contents: + raise ValueError("File '%s' missing required field '%s'!" % (self.file_path, "scope")) # If a file has flags, make sure they're sane. - if 'flags' in self.descriptor_contents: + if "flags" in self.descriptor_contents: # If a flag requires a name, make sure a name is provided. - for name_required in ('set_shell_var', 'set_build_var'): - if name_required in self.descriptor_contents['flags']: - if 'var_name' not in self.descriptor_contents: + for name_required in ("set_shell_var", "set_build_var"): + if name_required in self.descriptor_contents["flags"]: + if "var_name" not in self.descriptor_contents: raise ValueError( - "File '%s' has a flag requesting a var, but does not provide 'var_name'!" % self.file_path) + "File '%s' has a flag requesting a var, but does not provide 'var_name'!" % self.file_path + ) # clean up each string item for more reliable processing - for (k, v) in self.descriptor_contents.items(): - if (isinstance(v, str)): + for k, v in self.descriptor_contents.items(): + if isinstance(v, str): self.descriptor_contents[k] = self.sanitize_string(v) def sanitize_string(self, s: str) -> str: @@ -107,6 +108,7 @@ def sanitize_string(self, s: str) -> str: class PathEnvDescriptor(DescriptorFile): """Descriptor File for a PATH ENV.""" + def __init__(self, file_path: str) -> None: """Inits the descriptor as a PathEnvDescriptor from the provided path. @@ -121,10 +123,9 @@ def __init__(self, file_path: str) -> None: # Validate file contents. # # Make sure that the required fields are present. - for required_field in ('flags',): + for required_field in ("flags",): if required_field not in self.descriptor_contents: - raise ValueError("File '%s' missing required field '%s'!" % ( - self.file_path, required_field)) + raise ValueError("File '%s' missing required field '%s'!" % (self.file_path, required_field)) class ExternDepDescriptor(DescriptorFile): @@ -134,6 +135,7 @@ class ExternDepDescriptor(DescriptorFile): descriptor_contents (Dict): Contents of the Descriptor yaml file file_path (PathLike): path to the descriptor file """ + def __init__(self, file_path: str) -> None: """Inits the descriptor as a ExternDepDescriptor from the provided path. @@ -148,10 +150,9 @@ def __init__(self, file_path: str) -> None: # Validate file contents. # # Make sure that the required fields are present. - for required_field in ('scope', 'type', 'name', 'source', 'version'): + for required_field in ("scope", "type", "name", "source", "version"): if required_field not in self.descriptor_contents: - raise ValueError("File '%s' missing required field '%s'!" % ( - self.file_path, required_field)) + raise ValueError("File '%s' missing required field '%s'!" % (self.file_path, required_field)) class PluginDescriptor(DescriptorFile): @@ -161,6 +162,7 @@ class PluginDescriptor(DescriptorFile): descriptor_contents (Dict): Contents of the Descriptor yaml file file_path (PathLike): path to the descriptor file """ + def __init__(self, file_path: str) -> None: """Inits the descriptor as a PluginDescriptor from the provided path. @@ -175,12 +177,11 @@ def __init__(self, file_path: str) -> None: # Validate file contents. # # Make sure that the required fields are present. - for required_field in ('scope', 'name', 'module'): + for required_field in ("scope", "name", "module"): if required_field not in self.descriptor_contents: - raise ValueError("File '%s' missing required field '%s'!" % ( - self.file_path, required_field)) + raise ValueError("File '%s' missing required field '%s'!" % (self.file_path, required_field)) # Make sure the module item doesn't have .py on the end - if (self.descriptor_contents["module"].lower().endswith(".py")): + if self.descriptor_contents["module"].lower().endswith(".py"): # remove last 3 chars self.descriptor_contents["module"] = self.descriptor_contents["module"][:-3] diff --git a/edk2toolext/environment/extdeptypes/az_cli_universal_dependency.py b/edk2toolext/environment/extdeptypes/az_cli_universal_dependency.py index 0ede24da..babc8038 100644 --- a/edk2toolext/environment/extdeptypes/az_cli_universal_dependency.py +++ b/edk2toolext/environment/extdeptypes/az_cli_universal_dependency.py @@ -9,6 +9,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An ExternalDependency subclass able to download from an Azure feed.""" + import json import logging import os @@ -40,6 +41,7 @@ class AzureCliUniversalDependency(ExternalDependency): !!! tip The attributes are what must be described in the ext_dep yaml file! """ + TypeString = "az-universal" # https://docs.microsoft.com/en-us/azure/devops/cli/log-in-via-pat?view=azure-devops&tabs=windows @@ -48,7 +50,7 @@ class AzureCliUniversalDependency(ExternalDependency): VersionLogged = False @classmethod - def VerifyToolDependencies(cls: 'AzureCliUniversalDependency') -> None: + def VerifyToolDependencies(cls: "AzureCliUniversalDependency") -> None: """Verify any tool environment or dependencies requirements are met. Log to Version Aggregator the Tool Versions @@ -73,7 +75,7 @@ def VerifyToolDependencies(cls: 'AzureCliUniversalDependency') -> None: break # Log the versions found - for (k, v) in found.items(): + for k, v in found.items(): version_aggregator.GetVersionAggregator().ReportVersion(k, v, version_aggregator.VersionTypes.TOOL) # Check requirements @@ -81,7 +83,7 @@ def VerifyToolDependencies(cls: 'AzureCliUniversalDependency') -> None: # 1 - az cli tool missing will raise exception on call to az --version earlier in function # 2 - Check for azure-devops extension - if 'azure-devops' not in found.keys(): + if "azure-devops" not in found.keys(): logging.critical("Missing required Azure-cli extension azure-devops") raise EnvironmentError("Missing required Azure-cli extension azure-devops") @@ -92,15 +94,15 @@ def __init__(self, descriptor: dict) -> None: super().__init__(descriptor) self.global_cache_path = None self.organization = self.source - self.feed = descriptor.get('feed') - self.project = descriptor.get('project', None) - self.file_filter = descriptor.get('file-filter', None) - self.compression_type = descriptor.get('compression_type', None) - self.internal_path = descriptor.get('internal_path', "/") + self.feed = descriptor.get("feed") + self.project = descriptor.get("project", None) + self.file_filter = descriptor.get("file-filter", None) + self.compression_type = descriptor.get("compression_type", None) + self.internal_path = descriptor.get("internal_path", "/") if self.internal_path: self.internal_path = os.path.normpath(self.internal_path) self.internal_path = self.internal_path.strip(os.path.sep) - _pat_var = descriptor.get('pat_var', None) + _pat_var = descriptor.get("pat_var", None) self._pat = None if _pat_var is not None: @@ -144,11 +146,13 @@ def _attempt_universal_install(self, install_dir: str) -> None: # lets check it to double confirm result_data = json.loads(results.getvalue()) results.close() - downloaded_version = result_data['Version'] + downloaded_version = result_data["Version"] if self.version != downloaded_version: self.version = downloaded_version # set it so state file is accurate and will fail on verify - raise Exception("Download Universal Package version (%s) different than requested (%s)." % - (downloaded_version, self.version)) + raise Exception( + "Download Universal Package version (%s) different than requested (%s)." + % (downloaded_version, self.version) + ) def fetch(self) -> None: """Fetches the dependency using internal state from the init.""" diff --git a/edk2toolext/environment/extdeptypes/git_dependency.py b/edk2toolext/environment/extdeptypes/git_dependency.py index 23c97a61..fcd95e7b 100644 --- a/edk2toolext/environment/extdeptypes/git_dependency.py +++ b/edk2toolext/environment/extdeptypes/git_dependency.py @@ -8,6 +8,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An ExternalDependency subclass able to clone from git.""" + import logging import os from urllib.parse import urlsplit, urlunsplit @@ -27,6 +28,7 @@ class GitDependency(ExternalDependency): !!! tip The attributes are what must be described in the ext_dep yaml file! """ + TypeString = "git" def __init__(self, descriptor: dict) -> None: @@ -34,7 +36,7 @@ def __init__(self, descriptor: dict) -> None: super().__init__(descriptor) # Check to see whether this URL should be patched. - url_creds_var = descriptor.get('url_creds_var', None) + url_creds_var = descriptor.get("url_creds_var", None) if url_creds_var is not None: env = shell_environment.GetEnvironment() url_creds = env.get_shell_var(url_creds_var) @@ -42,11 +44,13 @@ def __init__(self, descriptor: dict) -> None: # Break things up. source_parts = urlsplit(self.source) # Modify the URL host with the creds. - new_parts = (source_parts.scheme, - url_creds + '@' + source_parts.netloc, - source_parts.path, - source_parts.query, - source_parts.fragment) + new_parts = ( + source_parts.scheme, + url_creds + "@" + source_parts.netloc, + source_parts.path, + source_parts.query, + source_parts.fragment, + ) # Put things back together. self.source = urlunsplit(new_parts) @@ -67,7 +71,7 @@ def fetch(self) -> None: try: repo_resolver.resolve(self._local_repo_root_path, self._repo_resolver_dep_obj, update_ok=True) except repo_resolver.GitCommandError as e: - logging.debug(f'Cmd failed for git dependency: {self._local_repo_root_path}') + logging.debug(f"Cmd failed for git dependency: {self._local_repo_root_path}") logging.debug(e) # Add a file to track the state of the dependency. @@ -89,23 +93,23 @@ def verify(self) -> bool: result = True details = repo_resolver.repo_details(self._local_repo_root_path) - if not details['Path'].is_dir(): - self.logger.info('Not a directory') + if not details["Path"].is_dir(): + self.logger.info("Not a directory") result = False - elif not any(details['Path'].iterdir()): - self.logger.info('No files in directory') + elif not any(details["Path"].iterdir()): + self.logger.info("No files in directory") result = False - elif not details['Initialized']: - self.logger.info('Not Initialized') + elif not details["Initialized"]: + self.logger.info("Not Initialized") result = False - elif details['Dirty']: - self.logger.info('Dirty') + elif details["Dirty"]: + self.logger.info("Dirty") result = False - elif self.version.lower() not in [details['Head']['HexSha'], details['Head']['HexShaShort']]: + elif self.version.lower() not in [details["Head"]["HexSha"], details["Head"]["HexShaShort"]]: self.logger.info(f'Mismatched sha: [head: {details["Head"]["HexSha"]}], [expected: {self.version}]') result = False diff --git a/edk2toolext/environment/extdeptypes/nuget_dependency.py b/edk2toolext/environment/extdeptypes/nuget_dependency.py index 71f6fcaa..c3d95ee4 100644 --- a/edk2toolext/environment/extdeptypes/nuget_dependency.py +++ b/edk2toolext/environment/extdeptypes/nuget_dependency.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An ExternalDependency subclass able to download from NuGet.""" + import logging import os import shutil @@ -30,6 +31,7 @@ class NugetDependency(ExternalDependency): !!! tip The attributes are what must be described in the ext_dep yaml file! """ + TypeString = "nuget" # Env variable name for path to folder containing NuGet.exe @@ -42,7 +44,7 @@ def __init__(self, descriptor: dict) -> None: self.nuget_cache_path = None @classmethod - def GetNugetCmd(cls: 'NugetDependency') -> list[str]: + def GetNugetCmd(cls: "NugetDependency") -> list[str]: """Appends mono to the command and resolves the full path of the exe for mono. Used to add nuget support on posix platforms. @@ -64,7 +66,7 @@ def GetNugetCmd(cls: 'NugetDependency') -> list[str]: if nuget_path is not None: nuget_path = os.path.join(nuget_path, "NuGet.exe") if not os.path.isfile(nuget_path): - logging.info(f'{cls.NUGET_ENV_VAR_NAME} set, but did not exist. Attempting to download.') + logging.info(f"{cls.NUGET_ENV_VAR_NAME} set, but did not exist. Attempting to download.") DownloadNuget(nuget_path) else: nuget_path = DownloadNuget() @@ -82,7 +84,7 @@ def GetNugetCmd(cls: 'NugetDependency') -> list[str]: return cmd @staticmethod - def normalize_version(version: str, nuget_name: Optional[str]="") -> str: + def normalize_version(version: str, nuget_name: Optional[str] = "") -> str: """Normalizes the version as NuGet versioning diverges from Semantic Versioning. https://learn.microsoft.com/en-us/nuget/concepts/package-versioning#where-nugetversion-diverges-from-semantic-versioning @@ -92,8 +94,7 @@ def normalize_version(version: str, nuget_name: Optional[str]="") -> str: """ # 1. NuGetVersion requires the major segment to be defined if not version: - raise ValueError("String is empty. At least major version is " - "required.") + raise ValueError("String is empty. At least major version is " "required.") # 2. NuGetVersion uses case insensitive string comparisons for # pre-release components @@ -110,8 +111,7 @@ def normalize_version(version: str, nuget_name: Optional[str]="") -> str: # 4. A maximum of 4 version segments are allowed if len(int_parts) > 4: - raise ValueError(f"Maximum of 4 version segments allowed: " - f"'{version}'!") + raise ValueError(f"Maximum of 4 version segments allowed: " f"'{version}'!") # 5. Allow a fourth version segment - "Revision" normally not # allowed in Semantic versions but allowed in NuGet versions. @@ -139,19 +139,20 @@ def normalize_version(version: str, nuget_name: Optional[str]="") -> str: nuget_ver = semantic_version.Version.coerce(reformed_ver) # A ValueError will be raised if the version string is invalid - major, minor, patch, prerelease, build = \ - semantic_version.Version.parse(str(nuget_ver)) + major, minor, patch, prerelease, build = semantic_version.Version.parse(str(nuget_ver)) else: # A ValueError will be raised if the version string is invalid nuget_ver = semantic_version.Version(reformed_ver) major, minor, patch, prerelease, build = tuple(nuget_ver) - logging.info(f"NuGet version parts:\n" - f" Major Version: {major}\n" - f" Minor Version: {minor}\n" - f" Patch Version: {patch}\n" - f" Pre-Release Version {prerelease}\n" - f" Revision (Build) Version: {build}") + logging.info( + f"NuGet version parts:\n" + f" Major Version: {major}\n" + f" Minor Version: {minor}\n" + f" Patch Version: {patch}\n" + f" Pre-Release Version {prerelease}\n" + f" Revision (Build) Version: {build}" + ) return reformed_ver @@ -166,7 +167,7 @@ def _fetch_from_nuget_cache(self, package_name: str) -> bool: cmd = NugetDependency.GetNugetCmd() cmd += ["locals", "global-packages", "-list"] return_buffer = StringIO() - if (RunCmd(cmd[0], " ".join(cmd[1:]), outstream=return_buffer) == 0): + if RunCmd(cmd[0], " ".join(cmd[1:]), outstream=return_buffer) == 0: # Seek to the beginning of the output buffer and capture the output. return_buffer.seek(0) return_string = return_buffer.read() @@ -188,24 +189,22 @@ def _fetch_from_nuget_cache(self, package_name: str) -> bool: try: nuget_version = NugetDependency.normalize_version(self.version) except ValueError: - logging.error(f"NuGet dependency {self.package} has an invalid " - f"version string: {self.version}") + logging.error(f"NuGet dependency {self.package} has an invalid " f"version string: {self.version}") - cache_search_path = os.path.join( - self.nuget_cache_path, package_name.lower(), nuget_version) + cache_search_path = os.path.join(self.nuget_cache_path, package_name.lower(), nuget_version) inner_cache_search_path = os.path.join(cache_search_path, package_name) if os.path.isdir(cache_search_path): # If we found a cache for this version, let's use it. if os.path.isdir(inner_cache_search_path): logging.info(self.nuget_cache_path) - logging.info( - "Local Cache found for Nuget package '%s'. Skipping fetch.", package_name) + logging.info("Local Cache found for Nuget package '%s'. Skipping fetch.", package_name) shutil.copytree(inner_cache_search_path, self.contents_dir) result = True # If this cache doesn't match our heuristic, let's warn the user. else: logging.warning( - "Local Cache found for Nuget package '%s', but could not find contents. Malformed?", package_name) + "Local Cache found for Nuget package '%s', but could not find contents. Malformed?", package_name + ) return result @@ -213,7 +212,7 @@ def __str__(self) -> str: """Return a string representation.""" return f"NugetDependecy: {self.package}@{self.version}" - def _attempt_nuget_install(self, install_dir: str, non_interactive: Optional[bool]=True) -> None: + def _attempt_nuget_install(self, install_dir: str, non_interactive: Optional[bool] = True) -> None: # # fetch the contents of the package. # @@ -252,10 +251,14 @@ def _attempt_nuget_install(self, install_dir: str, non_interactive: Optional[boo else: # Only provide this error message if they are not using a credential provider, but receive a 401 error if is_unauthorized and not found_cred_provider: - logging.warning("[Nuget] A package requires credentials, but you do not have a credential "\ - "provider installed.") - logging.warning("[Nuget] Please install a credential provider and try again or run the following "\ - "command in your terminal to install the package manually:") + logging.warning( + "[Nuget] A package requires credentials, but you do not have a credential " + "provider installed." + ) + logging.warning( + "[Nuget] Please install a credential provider and try again or run the following " + "command in your terminal to install the package manually:" + ) logging.warning(f"[{' '.join(cmd).replace(' -NonInteractive', '')}]") raise RuntimeError(f"[Nuget] We failed to install this version {self.version} of {package_name}") diff --git a/edk2toolext/environment/extdeptypes/web_dependency.py b/edk2toolext/environment/extdeptypes/web_dependency.py index 9b226a14..15767065 100644 --- a/edk2toolext/environment/extdeptypes/web_dependency.py +++ b/edk2toolext/environment/extdeptypes/web_dependency.py @@ -50,9 +50,9 @@ class WebDependency(ExternalDependency): def __init__(self, descriptor: dict) -> None: """Inits a web dependency based off the provided descriptor.""" super().__init__(descriptor) - self.internal_path = os.path.normpath(descriptor['internal_path']) - self.compression_type = descriptor.get('compression_type', None) - self.sha256 = descriptor.get('sha256', None) + self.internal_path = os.path.normpath(descriptor["internal_path"]) + self.compression_type = descriptor.get("compression_type", None) + self.sha256 = descriptor.get("sha256", None) # If the internal path starts with a / that means we are downloading a directory self.download_is_directory = self.internal_path.startswith(os.path.sep) @@ -90,7 +90,7 @@ def unpack(compressed_file_path: str, destination: str, internal_path: str, comp if compression_type == "zip": logging.info(f"{compressed_file_path} is a zip file, trying to unpack it.") - _ref = zipfile.ZipFile(compressed_file_path, 'r') + _ref = zipfile.ZipFile(compressed_file_path, "r") files_in_volume = _ref.namelist() elif compression_type and "tar" in compression_type: @@ -129,7 +129,7 @@ def fetch(self) -> None: try: # Download the file and save it locally under `temp_file_path` - with urllib.request.urlopen(url) as response, open(temp_file_path, 'wb') as out_file: + with urllib.request.urlopen(url) as response, open(temp_file_path, "wb") as out_file: out_file.write(response.read()) except urllib.error.HTTPError as e: logging.error(f"ran into an issue when resolving ext_dep {self.name} at {self.source}") @@ -139,12 +139,15 @@ def fetch(self) -> None: if self.sha256: with open(temp_file_path, "rb") as file: import hashlib + temp_file_sha256 = hashlib.sha256(file.read()).hexdigest() # compare sha256 hexdigests as lowercase to make case insensitive if temp_file_sha256.lower() != self.sha256.lower(): - raise RuntimeError(f"{self.name} - sha256 does not match\n\tdownloaded:" - f"\t{temp_file_sha256}\n\tin json:\t{self.sha256}") + raise RuntimeError( + f"{self.name} - sha256 does not match\n\tdownloaded:" + f"\t{temp_file_sha256}\n\tin json:\t{self.sha256}" + ) if os.path.isfile(temp_file_path) is False: raise RuntimeError(f"{self.name} did not download") diff --git a/edk2toolext/environment/external_dependency.py b/edk2toolext/environment/external_dependency.py index a383d4b4..b00e84a2 100644 --- a/edk2toolext/environment/external_dependency.py +++ b/edk2toolext/environment/external_dependency.py @@ -53,25 +53,22 @@ def __init__(self: str, descriptor: dict) -> None: # # Set the data for this object. # - self.scope = descriptor['scope'] - self.type = descriptor['type'] - self.name = descriptor['name'] - self.source = descriptor['source'] - self.version = descriptor['version'] - self.flags = descriptor.get('flags', None) - self.var_name = descriptor.get('var_name', None) - self.error_msg = descriptor.get('error_msg', None) + self.scope = descriptor["scope"] + self.type = descriptor["type"] + self.name = descriptor["name"] + self.source = descriptor["source"] + self.version = descriptor["version"] + self.flags = descriptor.get("flags", None) + self.var_name = descriptor.get("var_name", None) + self.error_msg = descriptor.get("error_msg", None) self.global_cache_path = None - self.descriptor_location = os.path.dirname( - descriptor['descriptor_file']) - self.contents_dir = os.path.join( - self.descriptor_location, self.name + "_extdep") - self.state_file_path = os.path.join( - self.contents_dir, "extdep_state.yaml") + self.descriptor_location = os.path.dirname(descriptor["descriptor_file"]) + self.contents_dir = os.path.join(self.descriptor_location, self.name + "_extdep") + self.state_file_path = os.path.join(self.contents_dir, "extdep_state.yaml") self.published_path = self.compute_published_path() - def set_global_cache_path(self, global_cache_path: str) -> 'ExternalDependency': + def set_global_cache_path(self, global_cache_path: str) -> "ExternalDependency": """Sets the global cache path to locate already downloaded dependencies. Arguments: @@ -87,9 +84,7 @@ def compute_published_path(self) -> str: if self.flags and "host_specific" in self.flags and self.verify(): host = GetHostInfo() - logging.info("Computing path for {0} located at {1} on {2}".format(self.name, - self.contents_dir, - str(host))) + logging.info("Computing path for {0} located at {1} on {2}".format(self.name, self.contents_dir, str(host))) acceptable_names = [] @@ -115,8 +110,9 @@ def compute_published_path(self) -> str: if new_published_path is None: logging.error(f"{self.name} is host specific, but does not appear to have support for {str(host)}.") - logging.error(f"Verify support for detected host: {str(host)} and contact dependency provider to add "\ - "support.") + logging.error( + f"Verify support for detected host: {str(host)} and contact dependency provider to add " "support." + ) logging.error("Otherwise, delete the external dependency directory to reset.") new_published_path = self.contents_dir @@ -137,8 +133,8 @@ def determine_cache_path(self) -> Optional[str]: result = None if self.global_cache_path is not None and os.path.isdir(self.global_cache_path): subpath_calc = hashlib.sha1() - subpath_calc.update(self.version.encode('utf-8')) - subpath_calc.update(self.source.encode('utf-8')) + subpath_calc.update(self.version.encode("utf-8")) + subpath_calc.update(self.source.encode("utf-8")) subpath = subpath_calc.hexdigest() result = os.path.join(self.global_cache_path, self.type, self.name, subpath) return result @@ -192,7 +188,7 @@ def verify(self) -> int: # Attempt to load the state file. if result: - with open(self.state_file_path, 'r') as file: + with open(self.state_file_path, "r") as file: try: state_data = yaml.safe_load(file) except Exception: @@ -201,7 +197,7 @@ def verify(self) -> int: result = False # If loaded, check the version. - if result and state_data['version'] != self.version: + if result and state_data["version"] != self.version: result = False logging.debug("Verify '%s' returning '%s'." % (self.name, result)) @@ -209,18 +205,17 @@ def verify(self) -> int: def report_version(self) -> None: """Reports the version of the external dependency.""" - version_aggregator.GetVersionAggregator().ReportVersion(self.name, - self.version, - version_aggregator.VersionTypes.INFO, - self.descriptor_location) + version_aggregator.GetVersionAggregator().ReportVersion( + self.name, self.version, version_aggregator.VersionTypes.INFO, self.descriptor_location + ) def update_state_file(self) -> None: """Updates the file representing the state of the dependency.""" - with open(self.state_file_path, 'w+') as file: - yaml.dump({'version': self.version}, file) + with open(self.state_file_path, "w+") as file: + yaml.dump({"version": self.version}, file) -def ExtDepFactory(descriptor: dict) -> 'ExternalDependency': +def ExtDepFactory(descriptor: dict) -> "ExternalDependency": """External Dependency Factory capable of generating each type of dependency. !!! Note @@ -230,14 +225,15 @@ def ExtDepFactory(descriptor: dict) -> 'ExternalDependency': from edk2toolext.environment.extdeptypes.git_dependency import GitDependency from edk2toolext.environment.extdeptypes.nuget_dependency import NugetDependency from edk2toolext.environment.extdeptypes.web_dependency import WebDependency - if descriptor['type'] == NugetDependency.TypeString: + + if descriptor["type"] == NugetDependency.TypeString: return NugetDependency(descriptor) - elif descriptor['type'] == WebDependency.TypeString: + elif descriptor["type"] == WebDependency.TypeString: return WebDependency(descriptor) - elif descriptor['type'] == GitDependency.TypeString: + elif descriptor["type"] == GitDependency.TypeString: return GitDependency(descriptor) - elif descriptor['type'] == AzureCliUniversalDependency.TypeString: + elif descriptor["type"] == AzureCliUniversalDependency.TypeString: AzureCliUniversalDependency.VerifyToolDependencies() return AzureCliUniversalDependency(descriptor) - raise ValueError("Unknown extdep type '%s' requested!" % descriptor['type']) + raise ValueError("Unknown extdep type '%s' requested!" % descriptor["type"]) diff --git a/edk2toolext/environment/multiple_workspace.py b/edk2toolext/environment/multiple_workspace.py index be778376..558d6f0a 100644 --- a/edk2toolext/environment/multiple_workspace.py +++ b/edk2toolext/environment/multiple_workspace.py @@ -28,11 +28,12 @@ class MultipleWorkspace(object): WORKSPACE (str): defined the current workspace PACKAGES_PATH (str): defined the other WORKSPACE """ - WORKSPACE = '' + + WORKSPACE = "" PACKAGES_PATH = None @classmethod - def convertPackagePath(cls: 'MultipleWorkspace', Ws: str, Path: str) -> str: + def convertPackagePath(cls: "MultipleWorkspace", Ws: str, Path: str) -> str: """Convert path to match workspace. Args: @@ -44,11 +45,11 @@ def convertPackagePath(cls: 'MultipleWorkspace', Ws: str, Path: str) -> str: (str): Converted path. """ if str(os.path.normcase(Path)).startswith(Ws): - return os.path.join(Ws, Path[len(Ws) + 1:]) + return os.path.join(Ws, Path[len(Ws) + 1 :]) return Path @classmethod - def setWs(cls: 'MultipleWorkspace', Ws: str, PackagesPath: Optional[list[str]]=None) -> None: + def setWs(cls: "MultipleWorkspace", Ws: str, PackagesPath: Optional[list[str]] = None) -> None: """Set WORKSPACE and PACKAGES_PATH environment. Args: @@ -58,13 +59,14 @@ def setWs(cls: 'MultipleWorkspace', Ws: str, PackagesPath: Optional[list[str]]=N """ cls.WORKSPACE = Ws if PackagesPath: - cls.PACKAGES_PATH = [cls.convertPackagePath(Ws, os.path.normpath( - Path.strip())) for Path in PackagesPath.split(os.pathsep)] + cls.PACKAGES_PATH = [ + cls.convertPackagePath(Ws, os.path.normpath(Path.strip())) for Path in PackagesPath.split(os.pathsep) + ] else: cls.PACKAGES_PATH = [] @classmethod - def join(cls: 'MultipleWorkspace', Ws: str, *p: str) -> str: + def join(cls: "MultipleWorkspace", Ws: str, *p: str) -> str: """Rewrite os.path.join. Args: @@ -77,7 +79,9 @@ def join(cls: 'MultipleWorkspace', Ws: str, *p: str) -> str: """ warnings.warn( "MultipleWorkspace is deprecated. Use Edk2Path.GetAbsolutePathOnThisSystemFromEdk2RelativePath().", - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) Path = os.path.join(Ws, *p) if not os.path.exists(Path): for Pkg in cls.PACKAGES_PATH: @@ -88,7 +92,7 @@ def join(cls: 'MultipleWorkspace', Ws: str, *p: str) -> str: return Path @classmethod - def relpath(cls: 'MultipleWorkspace', Path: str, Ws: str) -> str: + def relpath(cls: "MultipleWorkspace", Path: str, Ws: str) -> str: """Rewrite os.path.relpath. Args: @@ -101,7 +105,9 @@ def relpath(cls: 'MultipleWorkspace', Path: str, Ws: str) -> str: """ warnings.warn( "MultipleWorkspace is deprecated. use Edk2Path.GetEdk2RelativePathOnThisSystemFromAbsolutePath().", - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) for Pkg in cls.PACKAGES_PATH: if Path.lower().startswith(Pkg.lower()): Path = os.path.relpath(Path, Pkg) @@ -111,7 +117,7 @@ def relpath(cls: 'MultipleWorkspace', Path: str, Ws: str) -> str: return Path @classmethod - def getWs(cls: 'MultipleWorkspace', Ws: str, Path: str) -> str: + def getWs(cls: "MultipleWorkspace", Ws: str, Path: str) -> str: """Get valid workspace for the path. Args: @@ -131,7 +137,7 @@ def getWs(cls: 'MultipleWorkspace', Ws: str, Path: str) -> str: return Ws @classmethod - def handleWsMacro(cls: 'MultipleWorkspace', PathStr: str) -> str: + def handleWsMacro(cls: "MultipleWorkspace", PathStr: str) -> str: """Handle the $(WORKSPACE) tag. If current workspace is an invalid path relative to the tool, replace it. @@ -143,10 +149,13 @@ def handleWsMacro(cls: 'MultipleWorkspace', PathStr: str) -> str: Returns: (Str): Path string including the $(WORKSPACE) """ - warnings.warn("MultipleWorkspace is deprecated. Manually replace the $(WORKSPACE). If you believe " - "this functionality needs a direct replacement, file an issue in edk2-pytool-extensions.", - DeprecationWarning, stacklevel=2) - TAB_WORKSPACE = '$(WORKSPACE)' + warnings.warn( + "MultipleWorkspace is deprecated. Manually replace the $(WORKSPACE). If you believe " + "this functionality needs a direct replacement, file an issue in edk2-pytool-extensions.", + DeprecationWarning, + stacklevel=2, + ) + TAB_WORKSPACE = "$(WORKSPACE)" if TAB_WORKSPACE in PathStr: PathList = PathStr.split() if PathList: @@ -161,11 +170,11 @@ def handleWsMacro(cls: 'MultipleWorkspace', PathStr: str) -> str: if os.path.exists(Path): break PathList[i] = str[0:MacroStartPos] + Path - PathStr = ' '.join(PathList) + PathStr = " ".join(PathList) return PathStr @classmethod - def getPkgPath(cls: 'MultipleWorkspace') -> list[str]: + def getPkgPath(cls: "MultipleWorkspace") -> list[str]: """Get all package paths. Args: diff --git a/edk2toolext/environment/plugin_manager.py b/edk2toolext/environment/plugin_manager.py index cf2bcf86..8704bc26 100644 --- a/edk2toolext/environment/plugin_manager.py +++ b/edk2toolext/environment/plugin_manager.py @@ -25,7 +25,8 @@ class PluginDescriptor(object): Name (str): name attribute from descriptor Module (obj): module attribute from descriptor """ - def __init__(self, t:dict) -> None: + + def __init__(self, t: dict) -> None: """Inits the Plugin descriptor with the Descriptor.""" self.descriptor = t self.Obj = None @@ -43,6 +44,7 @@ class PluginManager(object): Attributes: Descriptors (List[PluginDescriptor]): list of plugin descriptors """ + def __init__(self) -> None: """Inits an empty plugin manager.""" self.Descriptors = [] @@ -55,7 +57,7 @@ def SetListOfEnvironmentDescriptors(self, newlist: list) -> int: return [] for a in newlist: b = PluginDescriptor(a) - if (self._load(b) == 0): + if self._load(b) == 0: val = env.GetValue(b.Module.upper()) if val and val == "skip": logging.info(f"{b.Module} turned off by environment variable") @@ -73,7 +75,7 @@ def GetPluginsOfClass(self, classobj: type) -> list[object]: """ temp = [] for a in self.Descriptors: - if (isinstance(a.Obj, classobj)): + if isinstance(a.Obj, classobj): temp.append(a) return temp @@ -81,7 +83,7 @@ def GetAllPlugins(self) -> list[object]: """Return list of all plugins.""" return self.Descriptors - def _load(self, PluginDescriptor: 'PluginDescriptor') -> int: + def _load(self, PluginDescriptor: "PluginDescriptor") -> int: """Load and instantiate the plugin. Args: @@ -90,15 +92,15 @@ def _load(self, PluginDescriptor: 'PluginDescriptor') -> int: PluginDescriptor.Obj = None py_file_path = PluginDescriptor.descriptor["module"] + ".py" - py_module_path = os.path.join(os.path.dirname(os.path.abspath( - PluginDescriptor.descriptor["descriptor_file"])), py_file_path) + py_module_path = os.path.join( + os.path.dirname(os.path.abspath(PluginDescriptor.descriptor["descriptor_file"])), py_file_path + ) py_module_name = "UefiBuild_Plugin_" + PluginDescriptor.descriptor["module"] logging.debug("Loading Plugin from %s", py_module_path) try: - spec = importlib.util.spec_from_file_location( - py_module_name, py_module_path) + spec = importlib.util.spec_from_file_location(py_module_name, py_module_path) module = importlib.util.module_from_spec(spec) sys.modules[py_module_name] = module @@ -112,8 +114,7 @@ def _load(self, PluginDescriptor: 'PluginDescriptor') -> int: spec.loader.exec_module(module) except Exception: exc_info = sys.exc_info() - logging.error("Failed to import plugin: %s", - py_module_path, exc_info=exc_info) + logging.error("Failed to import plugin: %s", py_module_path, exc_info=exc_info) return -1 # Instantiate the plugin @@ -122,8 +123,7 @@ def _load(self, PluginDescriptor: 'PluginDescriptor') -> int: PluginDescriptor.Obj = obj() except AttributeError: exc_info = sys.exc_info() - logging.error("Failed to instantiate plugin: %s", - py_module_path, exc_info=exc_info) + logging.error("Failed to instantiate plugin: %s", py_module_path, exc_info=exc_info) return -1 return 0 diff --git a/edk2toolext/environment/plugintypes/ci_build_plugin.py b/edk2toolext/environment/plugintypes/ci_build_plugin.py index c00c80c4..1f2b4b2b 100644 --- a/edk2toolext/environment/plugintypes/ci_build_plugin.py +++ b/edk2toolext/environment/plugintypes/ci_build_plugin.py @@ -21,6 +21,7 @@ class ICiBuildPlugin(object): """Plugin that supports adding tests or operations to the ci environment.""" + def RunBuildPlugin( self, packagename: str, @@ -30,7 +31,7 @@ def RunBuildPlugin( PLM: PluginManager, PLMHelper: HelperFunctions, tc: JunitReportTestCase, - output_stream: TextIO + output_stream: TextIO, ) -> int: """External function of plugin. @@ -79,10 +80,7 @@ def RunsOnTargetList(self) -> list[str]: return ["NO-TARGET"] def WalkDirectoryForExtension( - self, - extensionlist: list[str], - directory: os.PathLike, - ignorelist: list[str] = None + self, extensionlist: list[str], directory: os.PathLike, ignorelist: list[str] = None ) -> list[os.PathLike]: """Walks a file directory recursively for all items ending in certain extension. @@ -129,9 +127,9 @@ def WalkDirectoryForExtension( for Extension in extensionlist_lower: if File.lower().endswith(Extension): ignoreIt = False - if (ignorelist is not None): + if ignorelist is not None: for c in ignorelist_lower: - if (File.lower().startswith(c)): + if File.lower().startswith(c): ignoreIt = True break if not ignoreIt: diff --git a/edk2toolext/environment/plugintypes/uefi_build_plugin.py b/edk2toolext/environment/plugintypes/uefi_build_plugin.py index 0c77d7d8..104afe45 100644 --- a/edk2toolext/environment/plugintypes/uefi_build_plugin.py +++ b/edk2toolext/environment/plugintypes/uefi_build_plugin.py @@ -11,7 +11,7 @@ class IUefiBuildPlugin(object): """Plugin that supports Pre and Post Build Steps.""" - def do_post_build(self, thebuilder: 'UefiBuilder') -> int: # noqa: F821 + def do_post_build(self, thebuilder: "UefiBuilder") -> int: # noqa: F821 """Runs Post Build Plugin Operations. Args: @@ -22,7 +22,7 @@ def do_post_build(self, thebuilder: 'UefiBuilder') -> int: # noqa: F821 """ return 0 - def do_pre_build(self, thebuilder: 'UefiBuilder') -> int: # noqa: F821 + def do_pre_build(self, thebuilder: "UefiBuilder") -> int: # noqa: F821 """Runs Pre Build Plugin Operations. Args: diff --git a/edk2toolext/environment/plugintypes/uefi_helper_plugin.py b/edk2toolext/environment/plugintypes/uefi_helper_plugin.py index 87dedf91..0432e816 100644 --- a/edk2toolext/environment/plugintypes/uefi_helper_plugin.py +++ b/edk2toolext/environment/plugintypes/uefi_helper_plugin.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """Plugin that supports adding Extension or helper methods to the build environment.""" + import logging from typing import Callable @@ -16,7 +17,7 @@ class IUefiHelperPlugin(object): """The class that should be subclassed when creating a UEFI Helper Plugin.""" - def RegisterHelpers(self, obj: 'HelperFunctions') -> None: + def RegisterHelpers(self, obj: "HelperFunctions") -> None: """Allows a plugin to register its functions. !!! tip @@ -33,6 +34,7 @@ class HelperFunctions(object): Attributes: RegisteredFunctions(dict): registered functions """ + def __init__(self) -> None: """Initializes instance.""" self.RegisteredFunctions = {} @@ -45,8 +47,7 @@ def DebugLogRegisteredFunctions(self) -> None: logging.debug("Logging all Registered Helper Functions:") for name, file in self.RegisteredFunctions.items(): logging.debug(" Function %s registered from file %s", name, file) - logging.debug("Finished logging %d functions", - len(self.RegisteredFunctions)) + logging.debug("Finished logging %d functions", len(self.RegisteredFunctions)) def Register(self, name: str, function: Callable, filepath: str) -> None: """Registers a plugin. @@ -62,9 +63,11 @@ def Register(self, name: str, function: Callable, filepath: str) -> None: !!! tip ```os.path.abspath(__file__)``` """ - if (name in self.RegisteredFunctions.keys()): - raise Exception("Function %s already registered from plugin file %s. Can't register again from %s" % ( - name, self.RegisteredFunctions[name], filepath)) + if name in self.RegisteredFunctions.keys(): + raise Exception( + "Function %s already registered from plugin file %s. Can't register again from %s" + % (name, self.RegisteredFunctions[name], filepath) + ) setattr(self, name, function) self.RegisteredFunctions[name] = filepath @@ -77,7 +80,7 @@ def HasFunction(self, name: str) -> bool: Returns: (bool): if the function is registered or not. """ - if (name in self.RegisteredFunctions.keys()): + if name in self.RegisteredFunctions.keys(): return True else: return False @@ -101,8 +104,7 @@ def LoadFromPluginManager(self, pm: PluginManager) -> int: try: Descriptor.Obj.RegisterHelpers(self) except Exception as e: - logging.warning( - "Unable to register {0}".format(Descriptor.Name)) + logging.warning("Unable to register {0}".format(Descriptor.Name)) logging.error(e) error += 1 return error diff --git a/edk2toolext/environment/repo_resolver.py b/edk2toolext/environment/repo_resolver.py index 966d2e2f..181673bf 100644 --- a/edk2toolext/environment/repo_resolver.py +++ b/edk2toolext/environment/repo_resolver.py @@ -14,6 +14,7 @@ The intent is to keep all git functionality consolidated in this module. Currently edk2_ci_setup.py, edk2_setup.py, and git_dependency.py use this module to perform git operations. """ + import logging import os from pathlib import Path @@ -31,11 +32,7 @@ def resolve( - file_system_path: os.PathLike, - dependency: dict, - force: bool=False, - ignore: bool=False, - update_ok: bool=False + file_system_path: os.PathLike, dependency: dict, force: bool = False, ignore: bool = False, update_ok: bool = False ) -> None: """Resolves a particular repo. @@ -88,22 +85,19 @@ def resolve( if not details["Initialized"]: if force: clear_folder(git_path) - logger.warning( - f"Folder {git_path} is not a git repo and is being overwritten!") + logger.warning(f"Folder {git_path} is not a git repo and is being overwritten!") clone_repo(git_path, dependency) checkout(git_path, dependency, True, False) return else: - if (ignore): + if ignore: logger.warning( - f"Folder {git_path} is not a git repo but Force parameter not used. " - "Ignore State Allowed.") + f"Folder {git_path} is not a git repo but Force parameter not used. " "Ignore State Allowed." + ) return else: - logger.critical( - f"Folder {git_path} is not a git repo and it is not empty.") - raise Exception( - f"Folder {git_path} is not a git repo and it is not empty") + logger.critical(f"Folder {git_path} is not a git repo and it is not empty.") + raise Exception(f"Folder {git_path} is not a git repo and it is not empty") ########################################################################## # 4. A git repo exists, but it is dirty. Only re-clone and checkout if # @@ -112,22 +106,20 @@ def resolve( if details["Dirty"]: if force: clear_folder(git_path) - logger.warning( - f"Folder {git_path} is a git repo but is dirty and is being overwritten as requested!") + logger.warning(f"Folder {git_path} is a git repo but is dirty and is being overwritten as requested!") clone_repo(git_path, dependency) checkout(git_path, dependency, True, False) return else: - if (ignore): + if ignore: logger.warning( f"Folder {git_path} is a git repo but is dirty and Force parameter not used. " - "Ignore State Allowed.") + "Ignore State Allowed." + ) return else: - logger.critical( - f"Folder {git_path} is a git repo and is dirty.") - raise Exception( - f"Folder {git_path} is a git repo and is dirty.") + logger.critical(f"Folder {git_path} is a git repo and is dirty.") + raise Exception(f"Folder {git_path} is a git repo and is dirty.") ########################################################################## # 5. The origin of the repo does not match. Only re-clone from the # @@ -138,21 +130,28 @@ def resolve( clear_folder(git_path) logger.warning( f"Folder {git_path} is a git repo but it is at a different repo and is " - "being overwritten as requested!") + "being overwritten as requested!" + ) clone_repo(git_path, dependency) checkout(git_path, dependency, True, False) return else: if ignore: logger.warning( - f"Folder {git_path} is a git repo pointed at a different remote. " - "Can't checkout or sync state") + f"Folder {git_path} is a git repo pointed at a different remote. " "Can't checkout or sync state" + ) return else: - logger.critical("The URL of the git Repo {2} in the folder {0} does not match {1}".format( - git_path, dependency["Url"], details["Url"])) - raise Exception("The URL of the git Repo {2} in the folder {0} does not match {1}".format( - git_path, dependency["Url"], details["Url"])) + logger.critical( + "The URL of the git Repo {2} in the folder {0} does not match {1}".format( + git_path, dependency["Url"], details["Url"] + ) + ) + raise Exception( + "The URL of the git Repo {2} in the folder {0} does not match {1}".format( + git_path, dependency["Url"], details["Url"] + ) + ) ########################################################################## # 6. The repo is normal, Perform a regular checkout. # @@ -164,10 +163,10 @@ def resolve( def resolve_all( workspace_path: os.PathLike, dependencies: list[dict], - force: bool=False, - ignore: bool=False, - update_ok: bool=False, - omnicache_dir: str=None + force: bool = False, + ignore: bool = False, + update_ok: bool = False, + omnicache_dir: str = None, ) -> list[str]: """Resolves all repos. @@ -206,8 +205,11 @@ def resolve_all( git_path = os.path.join(workspace_path, dependency["Path"]) details = repo_details(git_path) # print out details - logger.info("{3} = Git Details: Url: {0} Branch {1} Commit {2}".format( - details["Url"], details["Branch"], details["Head"]["HexSha"], dependency["Path"])) + logger.info( + "{3} = Git Details: Url: {0} Branch {1} Commit {2}".format( + details["Url"], details["Branch"], details["Head"]["HexSha"], dependency["Path"] + ) + ) return repos @@ -242,18 +244,18 @@ def repo_details(abs_file_system_path: os.PathLike) -> dict: "Branch": None, "Submodules": [], "Remotes": [], - "Worktrees": [] + "Worktrees": [], } try: with Repo(abs_file_system_path) as repo: # Active Branch - details["Branch"] = 'HEAD' if repo.head.is_detached else repo.active_branch.name + details["Branch"] = "HEAD" if repo.head.is_detached else repo.active_branch.name # Worktrees worktree_list = [] worktrees = repo.git.worktree("list", "--porcelain") - for worktree in filter(lambda worktree: worktree.startswith("worktree"), worktrees.split('\n')): + for worktree in filter(lambda worktree: worktree.startswith("worktree"), worktrees.split("\n")): worktree_list.append(Path(worktree.split(" ")[1])) details["Worktrees"] = worktree_list @@ -288,8 +290,7 @@ def clear_folder(abs_file_system_path: os.PathLike) -> None: Args: abs_file_system_path (os.PathLike): Directory to delete. """ - logger.warning("WARNING: Deleting contents of folder {0} to make way for Git repo".format( - abs_file_system_path)) + logger.warning("WARNING: Deleting contents of folder {0} to make way for Git repo".format(abs_file_system_path)) rmtree(abs_file_system_path) @@ -325,17 +326,17 @@ def clone_repo(abs_file_system_path: os.PathLike, DepObj: dict) -> tuple: reference = Path(DepObj["ReferencePath"]) # Used to generate clone params from flags - def _build_params_list(branch: str=None, shallow: str=None, reference: str=None) -> None: + def _build_params_list(branch: str = None, shallow: str = None, reference: str = None) -> None: params = [] if branch: shallow = True - params.append('--branch') + params.append("--branch") params.append(branch) - params.append('--single-branch') + params.append("--single-branch") if shallow: - params.append('--depth=5') + params.append("--depth=5") if reference: - params.append('--reference') + params.append("--reference") params.append(reference.as_posix()) else: params.append("--recurse-submodules") # if we don't have a reference we can just recurse the submodules @@ -361,7 +362,7 @@ def _build_params_list(branch: str=None, shallow: str=None, reference: str=None) # Repo cloned, perform submodule update if necessary if reference: - repo.git.submodule('update', '--init', '--recursive', '--reference', reference) + repo.git.submodule("update", "--init", "--recursive", "--reference", reference) repo.close() return (dest, True) @@ -369,9 +370,9 @@ def _build_params_list(branch: str=None, shallow: str=None, reference: str=None) def checkout( abs_file_system_path: str, dep: dict, - update_ok: bool=False, - ignore_dep_state_mismatch: bool=False, - force: bool=False + update_ok: bool = False, + ignore_dep_state_mismatch: bool = False, + force: bool = False, ) -> None: """Checks out a commit or branch. @@ -407,16 +408,14 @@ def checkout( else: head = details["Head"] if commit in [head["HexSha"], head["HexShaShort"]]: - logger.debug( - f"Dependency {dep['Path']} state ok without update") + logger.debug(f"Dependency {dep['Path']} state ok without update") elif ignore_dep_state_mismatch: logger.warning( - f"Dependency {dep['Path']} is not in sync with requested commit. Ignore state allowed") + f"Dependency {dep['Path']} is not in sync with requested commit. Ignore state allowed" + ) else: - logger.critical( - f"Dependency {dep['Path']} is not in sync with requested commit. Fail.") - raise Exception( - f"Dependency {dep['Path']} is not in sync with requested commit. Fail.") + logger.critical(f"Dependency {dep['Path']} is not in sync with requested commit. Fail.") + raise Exception(f"Dependency {dep['Path']} is not in sync with requested commit. Fail.") return elif "Branch" in dep: @@ -439,14 +438,15 @@ def checkout( repo.git.submodule("update", "--init", "--recursive") else: if details["Branch"] == dep["Branch"]: - logger.debug( - f"Dependency {dep['Path']} state ok without update") + logger.debug(f"Dependency {dep['Path']} state ok without update") elif ignore_dep_state_mismatch: logger.warning( - f"Dependency {dep['Path']} is not in sync with requested branch. Ignore state allowed") + f"Dependency {dep['Path']} is not in sync with requested branch. Ignore state allowed" + ) else: error = "Dependency {0} is not in sync with requested branch. Expected: {1}. Got {2} Fail.".format( - dep["Path"], dep["Branch"], details["Branch"]) + dep["Path"], dep["Branch"], details["Branch"] + ) logger.critical(error) raise Exception(error) return @@ -455,7 +455,7 @@ def checkout( raise Exception("Branch or Commit must be specified for {0}".format(dep["Path"])) -def clean(abs_file_system_path: os.PathLike, ignore_files: Optional[list]=[]) -> None: +def clean(abs_file_system_path: os.PathLike, ignore_files: Optional[list] = []) -> None: """Resets and cleans the repo. Args: @@ -500,9 +500,7 @@ def submodule_clean(abs_file_system_path: os.PathLike, submodule: dict) -> None: def submodule_resolve( - abs_file_system_path: os.PathLike, - submodule: dict, - omnicache_path: Optional[os.PathLike]=None + abs_file_system_path: os.PathLike, submodule: dict, omnicache_path: Optional[os.PathLike] = None ) -> None: """Resolves a submodule to the specified branch and commit in .gitmodules. @@ -519,19 +517,18 @@ def submodule_resolve( (NoSuchPathError): The path does not exist """ with Repo(abs_file_system_path) as repo: + logger.debug(f"Syncing {submodule.path}") + repo.git.submodule("sync", "--", submodule.path) - logger.debug(f'Syncing {submodule.path}') - repo.git.submodule('sync', '--', submodule.path) - - params = ['update', '--init'] + params = ["update", "--init"] if submodule.recursive: params.append("--recursive") if omnicache_path: - params.append('--reference') + params.append("--reference") params.append(omnicache_path) params.append(submodule.path) - logger.debug(f'Updating {submodule.path}') + logger.debug(f"Updating {submodule.path}") repo.git.submodule(*params) with Repo(Path(abs_file_system_path, submodule.path)) as _: - logger.debug(f'{submodule.path} is valid and resolved.') + logger.debug(f"{submodule.path} is valid and resolved.") diff --git a/edk2toolext/environment/reporttypes/base_report.py b/edk2toolext/environment/reporttypes/base_report.py index e644d660..8097df08 100644 --- a/edk2toolext/environment/reporttypes/base_report.py +++ b/edk2toolext/environment/reporttypes/base_report.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An interface to create custom reports with.""" + from argparse import ArgumentParser, Namespace from typing import Tuple @@ -14,6 +15,7 @@ class Report: """The interface to create custom reports.""" + def report_info(self) -> Tuple[str, str]: """Returns the report standard information. diff --git a/edk2toolext/environment/reporttypes/component_report.py b/edk2toolext/environment/reporttypes/component_report.py index 5d6dbc52..153e90c3 100644 --- a/edk2toolext/environment/reporttypes/component_report.py +++ b/edk2toolext/environment/reporttypes/component_report.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """A report to print information about a component that could be compiled.""" + import sys from argparse import ArgumentParser, Namespace from pathlib import Path, PurePath @@ -17,6 +18,7 @@ class ComponentDumpReport: """A report to print information about a component that could be compiled.""" + def report_info(self) -> Tuple[str, str]: """Returns the report standard information. @@ -28,23 +30,41 @@ def report_info(self) -> Tuple[str, str]: def add_cli_options(self, parserobj: ArgumentParser) -> None: """Configure command line arguments for this report.""" parserobj.add_argument(dest="component", action="store", help="The component to query.") - parserobj.add_argument("-o", "--out", dest="file", default=sys.stdout, help="The file, to write the report to." - " Defaults to stdout.") - parserobj.add_argument("-d", "--depth", dest="depth", type=int, default=999, help="The depth to recurse when " - "printing libraries used.") - parserobj.add_argument("-f", "--flatten", dest="flatten", action="store_true", - help="Flatten the list of libraries used in the component.") - parserobj.add_argument("-s", "--sort", dest="sort", action="store_true", - help="Sort the libraries listed in alphabetical order.") - parserobj.add_argument("-e", "--env", dest="env_id", action="store", - help="The environment id to generate the report for.") + parserobj.add_argument( + "-o", + "--out", + dest="file", + default=sys.stdout, + help="The file, to write the report to." " Defaults to stdout.", + ) + parserobj.add_argument( + "-d", + "--depth", + dest="depth", + type=int, + default=999, + help="The depth to recurse when " "printing libraries used.", + ) + parserobj.add_argument( + "-f", + "--flatten", + dest="flatten", + action="store_true", + help="Flatten the list of libraries used in the component.", + ) + parserobj.add_argument( + "-s", "--sort", dest="sort", action="store_true", help="Sort the libraries listed in alphabetical order." + ) + parserobj.add_argument( + "-e", "--env", dest="env_id", action="store", help="The environment id to generate the report for." + ) def run_report(self, db: Edk2DB, args: Namespace) -> None: """Runs the report.""" if isinstance(args.file, str): if Path(args.file).exists(): Path(args.file).unlink() - self.file = open(args.file, 'w+') + self.file = open(args.file, "w+") else: self.file = args.file @@ -56,11 +76,10 @@ def run_report(self, db: Edk2DB, args: Namespace) -> None: with db.session() as session: self.env_id = args.env_id or session.query(Environment).order_by(desc(Environment.date)).first().id component = ( - session - .query(InstancedInf) - .filter_by(env=self.env_id, cls=None) - .filter(InstancedInf.path.like(f'%{self.component}%')) - .one() + session.query(InstancedInf) + .filter_by(env=self.env_id, cls=None) + .filter(InstancedInf.path.like(f"%{self.component}%")) + .one() ) if args.flatten: @@ -95,17 +114,16 @@ def print_libraries_recursive(self, library: InstancedInf, visited: list, sessio if library in visited: continue visited.append(library) - self.print_libraries_recursive( library, visited.copy(), session, depth=depth+1) + self.print_libraries_recursive(library, visited.copy(), session, depth=depth + 1) return def print_libraries_flat(self, component: str, session: Session) -> None: """Prints the libraries used in a provided component.""" libraries = ( - session - .query(InstancedInf) - .filter_by(env = self.env_id, component = component) - .filter(InstancedInf.cls.isnot(None)) - .all() + session.query(InstancedInf) + .filter_by(env=self.env_id, component=component) + .filter(InstancedInf.cls.isnot(None)) + .all() ) length = max(len(library.cls) for library in libraries) @@ -113,4 +131,4 @@ def print_libraries_flat(self, component: str, session: Session) -> None: libraries = sorted(libraries, key=lambda x: x.cls) for library in libraries: - print(f'- {library.cls:{length}}| {library.path}', file=self.file) + print(f"- {library.cls:{length}}| {library.path}", file=self.file) diff --git a/edk2toolext/environment/reporttypes/coverage_report.py b/edk2toolext/environment/reporttypes/coverage_report.py index 32689c05..04728d6d 100644 --- a/edk2toolext/environment/reporttypes/coverage_report.py +++ b/edk2toolext/environment/reporttypes/coverage_report.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """A report that re-organizes a cobertura.xml by INF.""" + import fnmatch import logging import os @@ -26,14 +27,17 @@ class SplitCommaAction(Action): """A Custom action similar to append, but will split the input string on commas first.""" + def __call__( - self, parser: ArgumentParser, - namespace: Namespace, - values: str | Sequence[str], - option_string: Optional[str] = None, - ) -> None: - """Command entry.""" - setattr(namespace, self.dest, getattr(namespace, self.dest, []) + values.split(',')) + self, + parser: ArgumentParser, + namespace: Namespace, + values: str | Sequence[str], + option_string: Optional[str] = None, + ) -> None: + """Command entry.""" + setattr(namespace, self.dest, getattr(namespace, self.dest, []) + values.split(",")) + class CoverageReport(Report): """A report that re-organizes a cobertura.xml by INF. @@ -42,48 +46,99 @@ class CoverageReport(Report): files in the specified edk2 packages. By-platform will only include coverage data for files used to build the specified platform dsc. """ + def report_info(self) -> Tuple[str, str]: """Returns the report standard information. Returns: (str, str): A tuple of (name, description) """ - return ("coverage", "Reorganizes an xml coverage report by INF rather than executable. Filters results based " - "on --by-package or --by-platform flags.") + return ( + "coverage", + "Reorganizes an xml coverage report by INF rather than executable. Filters results based " + "on --by-package or --by-platform flags.", + ) def add_cli_options(self, parserobj: ArgumentParser) -> None: """Configure command line arguments for this report.""" # Group 2 - Calculate coverage only on files used by a specific platform group = parserobj.add_argument_group("Coverage by platform options") - group.add_argument("--by-platform", action="store_true", dest="by_platform", default=False, - help="Filters test coverage to all files used to build the specified platform package.") - group.add_argument("-d", "--dsc", "--DSC", dest="dsc", - help="Edk2 relative path the ACTIVE_PLATFORM DSC file.") + group.add_argument( + "--by-platform", + action="store_true", + dest="by_platform", + default=False, + help="Filters test coverage to all files used to build the specified platform package.", + ) + group.add_argument("-d", "--dsc", "--DSC", dest="dsc", help="Edk2 relative path the ACTIVE_PLATFORM DSC file.") # Group 3 - Run either by-platform or by-package with a FULL report group = parserobj.add_argument_group("Full Report") - group.add_argument("--full", action="store_true", dest="full", default=False, - help="Include all files in the report, not just those with coverage data. Requires pygount.") - group.add_argument("-ws", "--workspace", "--Workspace", "--WORKSPACE", dest="workspace", - help="The Workspace root associated with the xml argument.", default=".") + group.add_argument( + "--full", + action="store_true", + dest="full", + default=False, + help="Include all files in the report, not just those with coverage data. Requires pygount.", + ) + group.add_argument( + "-ws", + "--workspace", + "--Workspace", + "--WORKSPACE", + dest="workspace", + help="The Workspace root associated with the xml argument.", + default=".", + ) # Other args parserobj.add_argument(dest="xml", action="store", help="The path to the XML file parse.") - parserobj.add_argument("-o", "--output", "--Output", "--OUTPUT", dest="output", default="Coverage.xml", - help="The path to the output XML file.", action="store") - parserobj.add_argument("-e", "--exclude", "--Exclude", "--EXCLUDE", dest="exclude", - action=SplitCommaAction, default=[], - help="Package path relative paths or file (.txt). Globbing is supported. Can be " - "specified multiple times") - parserobj.add_argument("-p", "--package", "--Package", "--PACKAGE", dest="package_list", - action=SplitCommaAction, default=[], - help="The package to include in the report. Can be specified multiple times.") - parserobj.add_argument("--flatten", action="store_true", dest="flatten", default=False, - help="Flatten the report to only source files. This removes duplicate files that are in " - "multiple INFs.") + parserobj.add_argument( + "-o", + "--output", + "--Output", + "--OUTPUT", + dest="output", + default="Coverage.xml", + help="The path to the output XML file.", + action="store", + ) + parserobj.add_argument( + "-e", + "--exclude", + "--Exclude", + "--EXCLUDE", + dest="exclude", + action=SplitCommaAction, + default=[], + help="Package path relative paths or file (.txt). Globbing is supported. Can be " + "specified multiple times", + ) + parserobj.add_argument( + "-p", + "--package", + "--Package", + "--PACKAGE", + dest="package_list", + action=SplitCommaAction, + default=[], + help="The package to include in the report. Can be specified multiple times.", + ) + parserobj.add_argument( + "--flatten", + action="store_true", + dest="flatten", + default=False, + help="Flatten the report to only source files. This removes duplicate files that are in " "multiple INFs.", + ) group = parserobj.add_argument_group("Deprecated Options") - group.add_argument("--by-package", action="store_true", dest="by_package", default=False, - help="Filters test coverage to only files in the specified packages(s)") + group.add_argument( + "--by-package", + action="store_true", + dest="by_package", + default=False, + help="Filters test coverage to only files in the specified packages(s)", + ) def run_report(self, db: Edk2DB, args: Namespace) -> None: """Generate the Coverage report.""" @@ -125,11 +180,10 @@ def run_by_platform(self, session: Session, package_list: list) -> None: logging.info(f"ACTIVE_PLATFORM requested: {dsc}") result = ( - session - .query(Environment) - .filter(Environment.values.any(key="ACTIVE_PLATFORM", value=dsc)) - .order_by(Environment.date.desc()) - .first() + session.query(Environment) + .filter(Environment.values.any(key="ACTIVE_PLATFORM", value=dsc)) + .order_by(Environment.date.desc()) + .first() ) if result is None: @@ -143,16 +197,15 @@ def run_by_platform(self, session: Session, package_list: list) -> None: # Build inf / source association dictionary inf_alias = aliased(InstancedInf) inf_list = ( - session - .query(inf_alias) - .join(Fv.infs) - .join(inf_alias, InstancedInf.path == inf_alias.component) - .filter(Fv.env == env_id) - .filter(inf_alias.env == env_id) - .filter(InstancedInf.env == env_id) - .group_by(inf_alias.path) - .distinct(inf_alias.path) - .all() + session.query(inf_alias) + .join(Fv.infs) + .join(inf_alias, InstancedInf.path == inf_alias.component) + .filter(Fv.env == env_id) + .filter(inf_alias.env == env_id) + .filter(InstancedInf.env == env_id) + .group_by(inf_alias.path) + .distinct(inf_alias.path) + .all() ) data = [ (inf.path, source.path) for inf in inf_list for source in inf.sources if source.path.lower().endswith(".c") @@ -174,20 +227,19 @@ def run_by_package(self, session: Session, package_list: list) -> bool: (bool): True if the report was successful, False otherwise. """ # Get env_id - env_id, = session.query(Environment.id).order_by(Environment.date.desc()).first() + (env_id,) = session.query(Environment.id).order_by(Environment.date.desc()).first() # Build source / coverage association dictionary coverage_files = self.build_source_coverage_dictionary(self.args.xml, package_list) # Build inf / source association dictionary data = ( - session - .query(Inf.path, Source.path) - .join(Inf.sources) - .filter(func.lower(Source.path).endswith('.c')) - .group_by(Inf.path, Source.path) - .distinct(Inf.path, Source.path) - .all() + session.query(Inf.path, Source.path) + .join(Inf.sources) + .filter(func.lower(Source.path).endswith(".c")) + .group_by(Inf.path, Source.path) + .distinct(Inf.path, Source.path) + .all() ) package_files = self.build_inf_source_dictionary(data, package_list) @@ -205,7 +257,7 @@ def build_source_coverage_dictionary(self, xml_path: str, package_list: list) -> dict[str, ET.Element]: A dictionary of source files and their coverage data. """ tree = ET.parse(xml_path) - regex = re.compile('|'.join(map(re.escape, package_list))) + regex = re.compile("|".join(map(re.escape, package_list))) file_dict = {} for file in tree.iter("class"): # Add the file results if they do not exist @@ -216,7 +268,7 @@ def build_source_coverage_dictionary(self, xml_path: str, package_list: list) -> if not match: continue - path = Path(filename[match.start():]).as_posix() + path = Path(filename[match.start() :]).as_posix() if path not in file_dict: file.attrib["filename"] = path file.attrib["name"] = "\\".join(Path(path).parts) @@ -290,7 +342,7 @@ def build_report(self, session: Session, env_id: int, source_coverage_dict: dict exclude_file = False for pattern in self.args.exclude: if fnmatch.fnmatch(source, pattern): - logging.debug(f'{source} excluded due to {pattern}') + logging.debug(f"{source} excluded due to {pattern}") exclude_file = True break if exclude_file: @@ -310,12 +362,13 @@ def build_report(self, session: Session, env_id: int, source_coverage_dict: dict xml_string = ET.tostring(root, "utf-8") dom = minidom.parseString(xml_string) - dt = minidom.getDOMImplementation('').createDocumentType( - 'coverage', None, "http://cobertura.sourceforge.net/xml/coverage-04.dtd") + dt = minidom.getDOMImplementation("").createDocumentType( + "coverage", None, "http://cobertura.sourceforge.net/xml/coverage-04.dtd" + ) dom.insertBefore(dt, dom.documentElement) p = Path(self.args.output) p.unlink(missing_ok=True) - with open(p, 'wb') as f: + with open(p, "wb") as f: f.write(dom.toprettyxml(encoding="utf-8", indent=" ")) logging.info(f"Coverage xml data written to {p}") @@ -333,6 +386,7 @@ def update_excluded_files(self) -> None: def create_source_xml(self, source_path: str, edk2path: Edk2Path) -> Optional[ET.Element]: """Parses the source file and creates a coverage 'lines' xml element for it.""" from pygount import SourceAnalysis + full_path = edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath(source_path, log_errors=False) if full_path is None: logging.warning(f"Could not find {source_path} in the workspace. Skipping...") @@ -352,7 +406,7 @@ def flatten_report(self, root: ET.Element, edk2path: Edk2Path) -> ET.Element: class_dict = {} for class_element in root.iter("class"): - filename = class_element.get('filename') + filename = class_element.get("filename") filename = "\\".join(Path(filename).parts) class_element.set("name", filename) class_dict[filename] = class_element @@ -360,10 +414,10 @@ def flatten_report(self, root: ET.Element, edk2path: Edk2Path) -> ET.Element: for class_element in class_dict.values(): class_list.append(class_element) - package_element = ET.Element("package", name = "All Source") + package_element = ET.Element("package", name="All Source") package_element.append(class_list) - packages = root.find('.//packages') + packages = root.find(".//packages") packages.clear() packages.append(package_element) return root diff --git a/edk2toolext/environment/reporttypes/usage_report.py b/edk2toolext/environment/reporttypes/usage_report.py index 6d1008e7..29d0cb50 100644 --- a/edk2toolext/environment/reporttypes/usage_report.py +++ b/edk2toolext/environment/reporttypes/usage_report.py @@ -8,6 +8,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """A report that generates an html report about which repositories INFs originate from.""" + import io import logging import pathlib @@ -22,28 +23,57 @@ from edk2toolext.environment.reporttypes import templates from edk2toolext.environment.reporttypes.base_report import Report -COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f','#bcbd22', '#17becf', - '#aec7e8', '#ffbb78', '#98df8a', '#ff9896', '#c5b0d5',] +COLORS = [ + "#1f77b4", + "#ff7f0e", + "#2ca02c", + "#d62728", + "#9467bd", + "#8c564b", + "#e377c2", + "#7f7f7f", + "#bcbd22", + "#17becf", + "#aec7e8", + "#ffbb78", + "#98df8a", + "#ff9896", + "#c5b0d5", +] class UsageReport(Report): """A report that generates a INF usage report for a specific build.""" + def report_info(self) -> Tuple[str, str]: """Returns the report standard information. Returns: (str, str): A tuple of (name, description) """ - return ("usage", "Generates a report of INF usage for a specific build. For accurate line counts, run " - "stuart_parse with the -S flag.") + return ( + "usage", + "Generates a report of INF usage for a specific build. For accurate line counts, run " + "stuart_parse with the -S flag.", + ) def add_cli_options(self, parserobj: ArgumentParser) -> None: """Configure command line arguments for this report.""" - parserobj.add_argument("-e", "-env", dest="env_id", action="store", - help = "The environment id to generate the report for. Defaults to the latest " - "environment.") - parserobj.add_argument("-o", "-output", dest="output", action="store", default=None, - help = "The output file to write the report to. Defaults to 'usage_report.html'.") + parserobj.add_argument( + "-e", + "-env", + dest="env_id", + action="store", + help="The environment id to generate the report for. Defaults to the latest " "environment.", + ) + parserobj.add_argument( + "-o", + "-output", + dest="output", + action="store", + default=None, + help="The output file to write the report to. Defaults to 'usage_report.html'.", + ) def run_report(self, db: Edk2DB, args: Namespace) -> None: """Generate the Usage report.""" @@ -74,7 +104,7 @@ def run_report(self, db: Edk2DB, args: Namespace) -> None: report_data = { "version": version, "env": env_vars, - "inf_list": inf_list, + "inf_list": inf_list, } # Build the pie charts and save them in report_data @@ -84,7 +114,7 @@ def run_report(self, db: Edk2DB, args: Namespace) -> None: values = [len(set(value)) for value in value.values()] else: values = [value[key] for key in value.keys()] - fig = go.Figure(go.Pie(labels=labels, values=values, hole = .3, title=title, titleposition="top center")) + fig = go.Figure(go.Pie(labels=labels, values=values, hole=0.3, title=title, titleposition="top center")) fig.update_traces(marker=dict(colors=[color_map[key] for key in value.keys()])) # Write the html html = io.StringIO() @@ -101,7 +131,7 @@ def run_report(self, db: Edk2DB, args: Namespace) -> None: path_out += ".html" pathlib.Path(path_out).parent.mkdir(exist_ok=True, parents=True) - with open(path_out, 'w') as f: + with open(path_out, "w") as f: f.write(html_output) logging.info(f"Report written to {path_out}.") @@ -130,14 +160,14 @@ def generate_data(self, env_id: int, session: Session) -> Tuple[dict, set]: inf_alias = aliased(InstancedInf) inf_list = ( session.query(inf_alias) - .join(Fv.infs) - .join(inf_alias, InstancedInf.path == inf_alias.component) - .filter(Fv.env == env_id) - .filter(inf_alias.env == env_id) - .filter(InstancedInf.env == env_id) - .group_by(inf_alias.path) - .distinct(inf_alias.path) - .all() + .join(Fv.infs) + .join(inf_alias, InstancedInf.path == inf_alias.component) + .filter(Fv.env == env_id) + .filter(inf_alias.env == env_id) + .filter(InstancedInf.env == env_id) + .group_by(inf_alias.path) + .distinct(inf_alias.path) + .all() ) final_data = [] @@ -149,14 +179,16 @@ def generate_data(self, env_id: int, session: Session) -> Tuple[dict, set]: else: package_name = inf.package.name - final_data.append(( - inf.repository.name, - package_name, - inf.path, - source.path, - source.code_lines, - inf.path == inf.component, - )) + final_data.append( + ( + inf.repository.name, + package_name, + inf.path, + source.path, + source.code_lines, + inf.path == inf.component, + ) + ) for repo, package, inf, _src, line_count, is_component in final_data: key = (repo, package, inf) diff --git a/edk2toolext/environment/rust.py b/edk2toolext/environment/rust.py index 48ddfc76..94bf5c32 100644 --- a/edk2toolext/environment/rust.py +++ b/edk2toolext/environment/rust.py @@ -124,9 +124,9 @@ def _verify_cmd(tool: RustToolInfo, custom_filters: List[CustomToolFilter]) -> i # Give precedence to custom filters as they may be more specialized for custom_filter in custom_filters: - if ( - (custom_filter.error_only and ret != 0) or not custom_filter.error_only - ) and custom_filter.filter_fn(tool, cmd_output.getvalue()): + if ((custom_filter.error_only and ret != 0) or not custom_filter.error_only) and custom_filter.filter_fn( + tool, cmd_output.getvalue() + ): logging.error(custom_filter.error_msg) return 4 @@ -266,9 +266,7 @@ def verify_workspace_rust_toolchain_is_installed() -> RustToolChainInfo: installed_toolchains = installed_toolchains.getvalue().splitlines() return RustToolChainInfo( - error=not any( - toolchain_version in toolchain for toolchain in installed_toolchains - ), + error=not any(toolchain_version in toolchain for toolchain in installed_toolchains), toolchain=toolchain_version, ) @@ -309,9 +307,7 @@ def run( Returns: int: Then number of errors discovered. 0 indicates success. """ - generic_rust_install_instructions = ( - "Visit https://rustup.rs/ to install Rust and cargo." - ) + generic_rust_install_instructions = "Visit https://rustup.rs/ to install Rust and cargo." tool_ver = _get_required_tool_versions() tools = { @@ -384,14 +380,8 @@ def run( } tools.update(custom_tool_checks) - excluded_tools_in_shell = shell_environment.GetEnvironment().get_shell_var( - "RUST_ENV_CHECK_TOOL_EXCLUSIONS" - ) - excluded_tools = ( - [t.strip() for t in excluded_tools_in_shell.split(",")] - if excluded_tools_in_shell - else [] - ) + excluded_tools_in_shell = shell_environment.GetEnvironment().get_shell_var("RUST_ENV_CHECK_TOOL_EXCLUSIONS") + excluded_tools = [t.strip() for t in excluded_tools_in_shell.split(",")] if excluded_tools_in_shell else [] errors = 0 for tool_name, tool_info in tools.items(): diff --git a/edk2toolext/environment/self_describing_environment.py b/edk2toolext/environment/self_describing_environment.py index 8e0a1d3f..9a8f6ac0 100644 --- a/edk2toolext/environment/self_describing_environment.py +++ b/edk2toolext/environment/self_describing_environment.py @@ -11,6 +11,7 @@ Scans the environment for files that describe the source and dependencies and then acts upon those files. """ + import logging import os import time @@ -31,11 +32,9 @@ class self_describing_environment(object): Scans the environment for files that describe the source and dependencies and then acts upon those files. """ + def __init__( - self, - workspace_path: str, - scopes: Optional[tuple]=None, - skipped_dirs: Optional[tuple]=None + self, workspace_path: str, scopes: Optional[tuple] = None, skipped_dirs: Optional[tuple] = None ) -> None: """Inits an empty self describing environment.""" logging.debug("--- self_describing_environment.__init__()") @@ -60,9 +59,11 @@ def __init__( details = repo_resolver.repo_details(self.workspace) if details["Valid"]: for worktree_path in details["Worktrees"]: - if (worktree_path.is_dir() - and Path(self.workspace) != worktree_path - and worktree_path not in skipped_dirs): + if ( + worktree_path.is_dir() + and Path(self.workspace) != worktree_path + and worktree_path not in skipped_dirs + ): self.skipped_dirs += (worktree_path,) # Validate that all scopes are unique. @@ -83,32 +84,27 @@ def _gather_env_files(self, ext_strings: list[str], base_path: str) -> dict: matches = {} for root, dirs, files in os.walk(base_path, topdown=True): # Check to see whether any of these directories should be skipped.. - dirs[:] = [d - for d - in dirs - if Path(root, d) not in self.skipped_dirs - and Path(root, d).name != '.git'] + dirs[:] = [d for d in dirs if Path(root, d) not in self.skipped_dirs and Path(root, d).name != ".git"] # Check for any files that match the extensions we're looking for. for file in files: for search_file in search_files: if file.lower().endswith(search_file + ".json") or file.lower().endswith(search_file + ".yaml"): if search_file in matches: - matches[search_file].append( - os.path.join(root, file)) + matches[search_file].append(os.path.join(root, file)) else: matches[search_file] = [os.path.join(root, file)] return matches - def load_workspace(self) ->'self_describing_environment': + def load_workspace(self) -> "self_describing_environment": """Loads the workspace.""" logging.debug("--- self_describing_environment.load_workspace()") logging.debug("Loading workspace: %s" % self.workspace) - logging.debug(" Including scopes: %s" % ', '.join(self.scopes)) + logging.debug(" Including scopes: %s" % ", ".join(self.scopes)) # First, we need to get all of the files that describe our environment. - env_files = self._gather_env_files(('path_env', 'ext_dep', 'plug_in'), self.workspace) + env_files = self._gather_env_files(("path_env", "ext_dep", "plug_in"), self.workspace) # Next, get a list of all our scopes all_scopes_lower = [x.lower() for x in self.scopes] @@ -120,22 +116,22 @@ def load_workspace(self) ->'self_describing_environment': all_descriptors = list() # helper function to get all the descriptors of a type and cast them - def _get_all_descriptors_of_type(key: str, class_type: type) ->tuple: + def _get_all_descriptors_of_type(key: str, class_type: type) -> tuple: if key not in env_files: return tuple() return tuple(class_type(desc_file) for desc_file in env_files[key]) # Collect all the descriptors of each type - all_descriptors.extend(_get_all_descriptors_of_type('path_env', EDF.PathEnvDescriptor)) - all_descriptors.extend(_get_all_descriptors_of_type('ext_dep', EDF.ExternDepDescriptor)) - all_descriptors.extend(_get_all_descriptors_of_type('plug_in', EDF.PluginDescriptor)) + all_descriptors.extend(_get_all_descriptors_of_type("path_env", EDF.PathEnvDescriptor)) + all_descriptors.extend(_get_all_descriptors_of_type("ext_dep", EDF.ExternDepDescriptor)) + all_descriptors.extend(_get_all_descriptors_of_type("plug_in", EDF.PluginDescriptor)) # Get the properly scoped descriptors by checking if the scope is in the list of all the scopes - scoped_desc_gen = [x for x in all_descriptors if x.descriptor_contents['scope'].lower() in all_scopes_lower] + scoped_desc_gen = [x for x in all_descriptors if x.descriptor_contents["scope"].lower() in all_scopes_lower] scoped_descriptors = list(scoped_desc_gen) # Check that each found item has a unique ID, that's an error if it isn't - allids_gen = [x.descriptor_contents['id'].lower() for x in scoped_descriptors if 'id' in x.descriptor_contents] + allids_gen = [x.descriptor_contents["id"].lower() for x in scoped_descriptors if "id" in x.descriptor_contents] all_ids = list(allids_gen) all_unique_ids = set(all_ids) if len(all_ids) != len(all_unique_ids): @@ -149,8 +145,9 @@ def _get_all_descriptors_of_type(key: str, class_type: type) ->tuple: continue # get the descriptors desc_of_id = [ - x for x in scoped_descriptors - if 'id' in x.descriptor_contents and x.descriptor_contents['id'].lower() == desc_id + x + for x in scoped_descriptors + if "id" in x.descriptor_contents and x.descriptor_contents["id"].lower() == desc_id ] paths_of_desc_of_id = [x.file_path for x in desc_of_id] invalid_desc_paths = f"{os.pathsep} ".join(paths_of_desc_of_id) @@ -176,8 +173,8 @@ def _get_all_descriptors_of_type(key: str, class_type: type) ->tuple: final_descriptors = [] for desc in scoped_descriptors: desc_file = desc.file_path - if 'id' in desc.descriptor_contents: - desc_id = desc.descriptor_contents['id'].lower() + if "id" in desc.descriptor_contents: + desc_id = desc.descriptor_contents["id"].lower() if desc_id in overriden_ids: override = active_overrides[desc_id] desc_name = f"{desc_file}:{desc_id}" @@ -185,14 +182,15 @@ def _get_all_descriptors_of_type(key: str, class_type: type) ->tuple: logging.debug(f"Skipping descriptor {desc_name} as it is being overridden by {override_name}.") continue # add them to the final list - desc_scope = desc.descriptor_contents['scope'] + desc_scope = desc.descriptor_contents["scope"] logging.debug(f"Adding descriptor {desc_file} to the environment with scope {desc_scope}") final_descriptors.append(desc) # Finally, sort them back in the right categories self.paths = list([x.descriptor_contents for x in final_descriptors if isinstance(x, EDF.PathEnvDescriptor)]) self.extdeps = list( - [x.descriptor_contents for x in final_descriptors if isinstance(x, EDF.ExternDepDescriptor)]) + [x.descriptor_contents for x in final_descriptors if isinstance(x, EDF.ExternDepDescriptor)] + ) self.plugins = list([x.descriptor_contents for x in final_descriptors if isinstance(x, EDF.PluginDescriptor)]) return self @@ -207,10 +205,7 @@ def _get_paths(self) -> EDF.PathEnv: yield EDF.PathEnv(path_descriptor) # This is a generator to reduce code duplication when wrapping the extdep objects. - def _get_extdeps( - self, - env_object: shell_environment.ShellEnvironment - ) -> external_dependency.ExternalDependency: + def _get_extdeps(self, env_object: shell_environment.ShellEnvironment) -> external_dependency.ExternalDependency: if self.extdeps is not None: global_cache_path = env_object.get_shell_var("STUART_EXTDEP_CACHE_PATH") # Apply in reverse order to get the expected hierarchy. @@ -223,23 +218,19 @@ def _get_extdeps( yield extdep def _apply_descriptor_object_to_env( - self, - desc_object: external_dependency.ExtDepFactory, - env_object: shell_environment.ShellEnvironment + self, desc_object: external_dependency.ExtDepFactory, env_object: shell_environment.ShellEnvironment ) -> None: # Walk through each possible environment modification # and apply to the environment as required. - if 'set_path' in desc_object.flags: + if "set_path" in desc_object.flags: env_object.insert_path(desc_object.published_path) - if 'set_pypath' in desc_object.flags: + if "set_pypath" in desc_object.flags: env_object.insert_pypath(desc_object.published_path) - if 'set_build_var' in desc_object.flags: - env_object.set_build_var( - desc_object.var_name, desc_object.published_path) - if 'set_shell_var' in desc_object.flags: - env_object.set_shell_var( - desc_object.var_name, desc_object.published_path) + if "set_build_var" in desc_object.flags: + env_object.set_build_var(desc_object.var_name, desc_object.published_path) + if "set_shell_var" in desc_object.flags: + env_object.set_shell_var(desc_object.var_name, desc_object.published_path) def update_simple_paths(self, env_object: shell_environment.ShellEnvironment) -> None: """Updates simple paths.""" @@ -268,15 +259,15 @@ def update_extdeps(self, env_object: shell_environment.ShellEnvironment) -> tupl logging.debug("--- self_describing_environment.update_extdeps()") # This function is called by our thread pool - def update_extdep(self: 'self_describing_environment', extdep: external_dependency.ExternalDependency) -> bool: + def update_extdep(self: "self_describing_environment", extdep: external_dependency.ExternalDependency) -> bool: # Check to see whether it's necessary to fetch the files. try: if not extdep.verify(): # Get rid of extdep published path since it could get changed # during the fetch routine. - if 'set_path' in extdep.flags: + if "set_path" in extdep.flags: env_object.remove_path_element(extdep.published_path) - if 'set_pypath' in extdep.flags: + if "set_pypath" in extdep.flags: env_object.remove_pypath_element(extdep.published_path) extdep.clean() extdep.fetch() @@ -293,6 +284,7 @@ def update_extdep(self: 'self_describing_environment', extdep: external_dependen if extdep.error_msg is not None: logging.warning(extdep.error_msg) return False + # prep the worker pool all_extdeps = self._get_extdeps(env_object) self_extdeps = [(self, x) for x in all_extdeps] @@ -313,7 +305,7 @@ def update_extdep(self: 'self_describing_environment', extdep: external_dependen old_count = num_extdeps # wait for the pool_handle (MapResult) to finish while pool_handle._number_left != 0: - while (old_count != pool_handle._number_left and old_count > 0): + while old_count != pool_handle._number_left and old_count > 0: print(".", end="", flush=True) old_count -= 1 time.sleep(0.1) # wait 100 ms @@ -354,7 +346,7 @@ def DestroyEnvironment() -> None: ENV_STATE = None -def BootstrapEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_dirs: Optional[tuple]=None) -> tuple: +def BootstrapEnvironment(workspace: str, scopes: Optional[tuple] = None, skipped_dirs: Optional[tuple] = None) -> tuple: """Performs a multistage bootstrap of the environment. 1. Locate and load all environment description files @@ -386,8 +378,7 @@ def BootstrapEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_d # ENVIRONMENT BOOTSTRAP STAGE 1 # Locate and load all environment description files. # - build_env = self_describing_environment( - workspace, scopes, skipped_dirs).load_workspace() + build_env = self_describing_environment(workspace, scopes, skipped_dirs).load_workspace() # # ENVIRONMENT BOOTSTRAP STAGE 2 @@ -419,7 +410,7 @@ def BootstrapEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_d return ENV_STATE -def CleanEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_dirs: Optional[tuple]=None) -> None: +def CleanEnvironment(workspace: str, scopes: Optional[tuple] = None, skipped_dirs: Optional[tuple] = None) -> None: """Cleans all external dependencies based on environment. Environment is bootstrapped from provided arguments and all dependencies @@ -445,7 +436,7 @@ def CleanEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_dirs: build_env.clean_extdeps(shell_env) -def UpdateDependencies(workspace: str, scopes: Optional[tuple]=None, skipped_dirs: Optional[tuple]=None) -> tuple: +def UpdateDependencies(workspace: str, scopes: Optional[tuple] = None, skipped_dirs: Optional[tuple] = None) -> tuple: """Updates all external dependencies based on environment. Environment is bootstrapped from provided arguments and all dependencies @@ -474,7 +465,7 @@ def UpdateDependencies(workspace: str, scopes: Optional[tuple]=None, skipped_dir return build_env.update_extdeps(shell_env) -def VerifyEnvironment(workspace: str, scopes: Optional[tuple]=None, skipped_dirs: Optional[tuple]=None) -> bool: +def VerifyEnvironment(workspace: str, scopes: Optional[tuple] = None, skipped_dirs: Optional[tuple] = None) -> bool: """Verifies all external dependencies based on environment. Environment is bootstrapped from provided arguments and all dependencies diff --git a/edk2toolext/environment/shell_environment.py b/edk2toolext/environment/shell_environment.py index ffdc1112..7f9b86f4 100644 --- a/edk2toolext/environment/shell_environment.py +++ b/edk2toolext/environment/shell_environment.py @@ -10,6 +10,7 @@ This management includes PATH, PYTHONPATH and ENV Variables. """ + import copy import logging import os @@ -26,10 +27,10 @@ # Copy the Singleton pattern from... # https://stackoverflow.com/a/6798042 # -class Singleton(type): # noqa +class Singleton(type): # noqa _instances = {} - def __call__(cls, *args, **kwargs): # noqa + def __call__(cls, *args, **kwargs): # noqa if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] @@ -42,6 +43,7 @@ class ShellEnvironment(metaclass=Singleton): screenshots (checkpoints) that are stored and can be accessed later. """ + # Easy definition for the very first checkpoint # when the environment is first created. INITIAL_CHECKPOINT = 0 @@ -76,7 +78,7 @@ def import_environment(self) -> None: self.active_environ[key] = value # Record the PATH elements of the current environment. - path = self.active_environ.get('PATH', "") + path = self.active_environ.get("PATH", "") # Filter removes empty elements. # List creates an actual list rather than a generator. @@ -132,12 +134,14 @@ def checkpoint(self) -> int: 4. active_buildvars """ new_index = len(self.checkpoints) - self.checkpoints.append({ - 'environ': copy.copy(self.active_environ), - 'path': self.active_path, - 'pypath': self.active_pypath, - 'buildvars': copy.copy(self.active_buildvars) - }) + self.checkpoints.append( + { + "environ": copy.copy(self.active_environ), + "path": self.active_path, + "pypath": self.active_pypath, + "buildvars": copy.copy(self.active_buildvars), + } + ) return new_index @@ -145,10 +149,10 @@ def restore_checkpoint(self, index: int) -> None: """Restore a specific checkpoint.""" if index < len(self.checkpoints): check_point = self.checkpoints[index] - self.active_environ = copy.copy(check_point['environ']) - self.active_path = check_point['path'] - self.active_pypath = check_point['pypath'] - self.active_buildvars = copy.copy(check_point['buildvars']) + self.active_environ = copy.copy(check_point["environ"]) + self.active_path = check_point["path"] + self.active_pypath = check_point["pypath"] + self.active_buildvars = copy.copy(check_point["buildvars"]) self.export_environment() @@ -335,9 +339,8 @@ def set_build_var(self, var_name: str, var_data: str) -> None: var_name (str): variable to set the value for var_data (obj): data to set """ - self.logger.debug( - "Updating BUILD VAR element '%s': '%s'." % (var_name, var_data)) - self.active_buildvars.SetValue(var_name, var_data, '', overridable=True) + self.logger.debug("Updating BUILD VAR element '%s': '%s'." % (var_name, var_data)) + self.active_buildvars.SetValue(var_name, var_data, "", overridable=True) def get_shell_var(self, var_name: str) -> str: """Gets the shell variable. @@ -363,13 +366,12 @@ def set_shell_var(self, var_name: str, var_data: str) -> None: # Check for the "special" shell vars. if var_data is None: raise ValueError("Unexpected var_data: None") - if var_name.upper() == 'PATH': + if var_name.upper() == "PATH": self.set_path(var_data) - elif var_name.upper() == 'PYTHONPATH': + elif var_name.upper() == "PYTHONPATH": self.set_pypath(var_data) else: - self.logger.debug( - "Updating SHELL VAR element '%s': '%s'." % (var_name, var_data)) + self.logger.debug("Updating SHELL VAR element '%s': '%s'." % (var_name, var_data)) self.active_environ[var_name] = var_data os.environ[var_name] = var_data @@ -389,6 +391,7 @@ def GetBuildVars() -> var_dict.VarDict: Returns: (VarDict): A special dictionary containing build vars """ + # # Tricky! # Define a wrapper class that always forwards commands to the @@ -400,7 +403,7 @@ class BuildVarsWrapper(object): def __init__(self) -> None: self.internal_shell_env = ShellEnvironment() - def __getattr__(self, attrname: str) -> Any: # noqa: ANN401 + def __getattr__(self, attrname: str) -> Any: # noqa: ANN401 # Instead, invoke on the active BuildVars object. return getattr(self.internal_shell_env.active_buildvars, attrname) diff --git a/edk2toolext/environment/uefi_build.py b/edk2toolext/environment/uefi_build.py index 6606257c..2d926fa0 100644 --- a/edk2toolext/environment/uefi_build.py +++ b/edk2toolext/environment/uefi_build.py @@ -82,28 +82,87 @@ def AddPlatformCommandLineOptions(self, parserObj: argparse.ArgumentParser) -> N Args: parserObj (argparser): argparser object """ - parserObj.add_argument("--SKIPBUILD", "--skipbuild", "--SkipBuild", dest="SKIPBUILD", - action='store_true', default=False, help="Skip the build process") - parserObj.add_argument("--SKIPPREBUILD", "--skipprebuild", "--SkipPrebuild", dest="SKIPPREBUILD", - action='store_true', default=False, help="Skip prebuild process") - parserObj.add_argument("--SKIPPOSTBUILD", "--skippostbuild", "--SkipPostBuild", dest="SKIPPOSTBUILD", - action='store_true', default=False, help="Skip postbuild process") - parserObj.add_argument("--FLASHONLY", "--flashonly", "--FlashOnly", dest="FLASHONLY", - action='store_true', default=False, help="Flash rom after build.") - parserObj.add_argument("--FLASHROM", "--flashrom", "--FlashRom", dest="FLASHROM", - action='store_true', default=False, help="Flash rom. Rom must be built previously.") - parserObj.add_argument("--UPDATECONF", "--updateconf", "--UpdateConf", - dest="UPDATECONF", action='store_true', default=False, - help="Update Conf. Builders Conf files will be replaced with latest template files") - parserObj.add_argument("--CLEAN", "--clean", "--CLEAN", dest="CLEAN", - action='store_true', default=False, - help="Clean. Remove all old build artifacts and intermediate files") - parserObj.add_argument("--CLEANONLY", "--cleanonly", "--CleanOnly", dest="CLEANONLY", - action='store_true', default=False, - help="Clean Only. Do clean operation and don't build just exit.") - parserObj.add_argument("--OUTPUTCONFIG", "--outputconfig", "--OutputConfig", - dest='OutputConfig', required=False, type=str, - help='Provide shell variables in a file') + parserObj.add_argument( + "--SKIPBUILD", + "--skipbuild", + "--SkipBuild", + dest="SKIPBUILD", + action="store_true", + default=False, + help="Skip the build process", + ) + parserObj.add_argument( + "--SKIPPREBUILD", + "--skipprebuild", + "--SkipPrebuild", + dest="SKIPPREBUILD", + action="store_true", + default=False, + help="Skip prebuild process", + ) + parserObj.add_argument( + "--SKIPPOSTBUILD", + "--skippostbuild", + "--SkipPostBuild", + dest="SKIPPOSTBUILD", + action="store_true", + default=False, + help="Skip postbuild process", + ) + parserObj.add_argument( + "--FLASHONLY", + "--flashonly", + "--FlashOnly", + dest="FLASHONLY", + action="store_true", + default=False, + help="Flash rom after build.", + ) + parserObj.add_argument( + "--FLASHROM", + "--flashrom", + "--FlashRom", + dest="FLASHROM", + action="store_true", + default=False, + help="Flash rom. Rom must be built previously.", + ) + parserObj.add_argument( + "--UPDATECONF", + "--updateconf", + "--UpdateConf", + dest="UPDATECONF", + action="store_true", + default=False, + help="Update Conf. Builders Conf files will be replaced with latest template files", + ) + parserObj.add_argument( + "--CLEAN", + "--clean", + "--CLEAN", + dest="CLEAN", + action="store_true", + default=False, + help="Clean. Remove all old build artifacts and intermediate files", + ) + parserObj.add_argument( + "--CLEANONLY", + "--cleanonly", + "--CleanOnly", + dest="CLEANONLY", + action="store_true", + default=False, + help="Clean Only. Do clean operation and don't build just exit.", + ) + parserObj.add_argument( + "--OUTPUTCONFIG", + "--outputconfig", + "--OutputConfig", + dest="OutputConfig", + required=False, + type=str, + help="Provide shell variables in a file", + ) def RetrievePlatformCommandLineOptions(self, args: argparse.Namespace) -> None: """Retrieve command line options from the argparser. @@ -120,12 +179,12 @@ def RetrievePlatformCommandLineOptions(self, args: argparse.Namespace) -> None: self.FlashImage = args.FLASHROM self.UpdateConf = args.UPDATECONF - if (args.FLASHONLY): + if args.FLASHONLY: self.SkipPostBuild = True self.SkipBuild = True self.SkipPreBuild = True self.FlashImage = True - elif (args.CLEANONLY): + elif args.CLEANONLY: self.Clean = True self.SkipBuild = True self.SkipPreBuild = True @@ -150,67 +209,66 @@ def Go(self, WorkSpace: str, PackagesPath: str, PInHelper: HelperFunctions, PInM self.Helper.DebugLogRegisteredFunctions() ret = self.SetEnv() - if (ret != 0): + if ret != 0: logging.critical("SetEnv failed") return ret # clean - if (self.Clean): + if self.Clean: edk2_logging.log_progress("Cleaning") ret = self.CleanTree() - if (ret != 0): + if ret != 0: logging.critical("Clean failed") return ret # prebuild - if (self.SkipPreBuild): + if self.SkipPreBuild: edk2_logging.log_progress("Skipping Pre Build") else: ret = self.PreBuild() - if (ret != 0): + if ret != 0: logging.critical("Pre Build failed") return ret # Output Build Environment to File - this is mostly for debug of build # issues or adding other build features using existing variables - if (self.OutputConfig is not None): + if self.OutputConfig is not None: edk2_logging.log_progress("Writing Build Env Info out to File") logging.debug("Found an Output Build Env File: " + self.OutputConfig) self.env.PrintAll(self.OutputConfig) if (self.env.GetValue("GATEDBUILD") is not None) and (self.env.GetValue("GATEDBUILD").upper() == "TRUE"): ShouldGatedBuildRun = self.PlatformGatedBuildShouldHappen() - logging.debug("Platform Gated Build Should Run returned: %s" % str( - ShouldGatedBuildRun)) - if (not self.SkipBuild): + logging.debug("Platform Gated Build Should Run returned: %s" % str(ShouldGatedBuildRun)) + if not self.SkipBuild: self.SkipBuild = not ShouldGatedBuildRun - if (not self.SkipPostBuild): + if not self.SkipPostBuild: self.SkipPostBuild = not ShouldGatedBuildRun # build - if (self.SkipBuild): + if self.SkipBuild: edk2_logging.log_progress("Skipping Build") else: ret = self.Build() - if (ret != 0): + if ret != 0: logging.critical("Build failed") return ret # postbuild - if (self.SkipPostBuild): + if self.SkipPostBuild: edk2_logging.log_progress("Skipping Post Build") else: ret = self.PostBuild() - if (ret != 0): + if ret != 0: logging.critical("Post Build failed") return ret # flash - if (self.FlashImage): + if self.FlashImage: edk2_logging.log_progress("Flashing Image") ret = self.FlashRomImage() - if (ret != 0): + if ret != 0: logging.critical("Flash Image failed") return ret @@ -231,12 +289,15 @@ def Go(self, WorkSpace: str, PackagesPath: str, PInHelper: HelperFunctions, PInM finally: end_time = time.perf_counter() elapsed_time_s = int((end_time - start_time)) - edk2_logging.log_progress("End time: {0}\t Total time Elapsed: {1}".format( - datetime.datetime.now(), datetime.timedelta(seconds=elapsed_time_s))) + edk2_logging.log_progress( + "End time: {0}\t Total time Elapsed: {1}".format( + datetime.datetime.now(), datetime.timedelta(seconds=elapsed_time_s) + ) + ) return 0 - def CleanTree(self, RemoveConfTemplateFilesToo: bool=False) -> int: + def CleanTree(self, RemoveConfTemplateFilesToo: bool = False) -> int: """Cleans the build directory. Args: @@ -247,7 +308,7 @@ def CleanTree(self, RemoveConfTemplateFilesToo: bool=False) -> int: edk2_logging.log_progress("Cleaning All Output for Build") d = self.env.GetValue("BUILD_OUTPUT_BASE") - if (os.path.isdir(d)): + if os.path.isdir(d): logging.debug("Removing [%s]", d) # if the folder is opened in Explorer do not fail the entire Rebuild try: @@ -261,14 +322,14 @@ def CleanTree(self, RemoveConfTemplateFilesToo: bool=False) -> int: # delete the conf .dbcache # this needs to be removed in case build flags changed d = os.path.join(self.ws, "Conf", ".cache") - if (os.path.isdir(d)): + if os.path.isdir(d): logging.debug("Removing [%s]" % d) RemoveTree(d) - if (RemoveConfTemplateFilesToo): + if RemoveConfTemplateFilesToo: for a in ["target.txt", "build_rule.txt", "tools_def.txt"]: d = os.path.join(self.ws, "Conf", a) - if (os.path.isfile(d)): + if os.path.isfile(d): logging.debug("Removing [%s]" % d) os.remove(d) @@ -293,7 +354,7 @@ def Build(self) -> int: params += " -a " + t # get the report options and setup the build command - if (self.env.GetValue("BUILDREPORTING") == "TRUE"): + if self.env.GetValue("BUILDREPORTING") == "TRUE": params += " -y " + self.env.GetValue("BUILDREPORT_FILE") rt = self.env.GetValue("BUILDREPORT_TYPES").split(" ") for t in rt: @@ -301,7 +362,7 @@ def Build(self) -> int: # add special processing to handle building a single module mod = self.env.GetValue("BUILDMODULE") - if (mod is not None and len(mod.strip()) > 0): + if mod is not None and len(mod.strip()) > 0: params += " -m " + mod edk2_logging.log_progress("Single Module Build: " + mod) self.SkipPostBuild = True @@ -317,7 +378,7 @@ def Build(self) -> int: # WORKAROUND - Pin the PYTHONHASHSEED so that TianoCore build tools # have consistent ordering. Addresses incremental builds. pre_build_env_chk = env.checkpoint() - env.set_shell_var('PYTHONHASHSEED', '0') + env.set_shell_var("PYTHONHASHSEED", "0") env.log_environment() edk2_build_cmd = self.env.GetValue("EDK_BUILD_CMD") @@ -339,7 +400,7 @@ def Build(self) -> int: for level, problem in problems: logging.log(level, problem) - if (ret != 0): + if ret != 0: return ret return 0 @@ -355,7 +416,7 @@ def PreBuild(self) -> int: # ret = self.PlatformPreBuild() - if (ret != 0): + if ret != 0: logging.critical("PlatformPreBuild failed %d" % ret) return ret # @@ -363,14 +424,12 @@ def PreBuild(self) -> int: # for Descriptor in self.pm.GetPluginsOfClass(IUefiBuildPlugin): rc = Descriptor.Obj.do_pre_build(self) - if (rc != 0): - if (rc is None): - logging.error( - "Plugin Failed: %s returned NoneType" % Descriptor.Name) + if rc != 0: + if rc is None: + logging.error("Plugin Failed: %s returned NoneType" % Descriptor.Name) ret = -1 else: - logging.error("Plugin Failed: %s returned %d" % - (Descriptor.Name, rc)) + logging.error("Plugin Failed: %s returned %d" % (Descriptor.Name, rc)) ret = rc break # fail on plugin error else: @@ -388,7 +447,7 @@ def PostBuild(self) -> int: # ret = self.PlatformPostBuild() - if (ret != 0): + if ret != 0: logging.critical("PlatformPostBuild failed %d" % ret) return ret @@ -397,14 +456,12 @@ def PostBuild(self) -> int: # for Descriptor in self.pm.GetPluginsOfClass(IUefiBuildPlugin): rc = Descriptor.Obj.do_post_build(self) - if (rc != 0): - if (rc is None): - logging.error( - "Plugin Failed: %s returned NoneType" % Descriptor.Name) + if rc != 0: + if rc is None: + logging.error("Plugin Failed: %s returned NoneType" % Descriptor.Name) ret = -1 else: - logging.error("Plugin Failed: %s returned %d" % - (Descriptor.Name, rc)) + logging.error("Plugin Failed: %s returned %d" % (Descriptor.Name, rc)) ret = rc break # fail on plugin error else: @@ -421,14 +478,13 @@ def SetEnv(self) -> int: shell_environment.GetEnvironment().set_shell_var("WORKSPACE", self.ws) shell_environment.GetBuildVars().SetValue("WORKSPACE", self.ws, "Set in SetEnv") - if (self.pp is not None): + if self.pp is not None: shell_environment.GetEnvironment().set_shell_var("PACKAGES_PATH", self.pp) - shell_environment.GetBuildVars().SetValue( - "PACKAGES_PATH", self.pp, "Set in SetEnv") + shell_environment.GetBuildVars().SetValue("PACKAGES_PATH", self.pp, "Set in SetEnv") # process platform parameters defined in platform build file ret = self.SetPlatformEnv() - if (ret != 0): + if ret != 0: logging.critical("Set Platform Env failed") return ret @@ -438,7 +494,7 @@ def SetEnv(self) -> int: # Handle all the template files for workspace/conf/ Allow override TemplateDirList = [self.env.GetValue("EDK_TOOLS_PATH")] # set to edk2 BaseTools PlatTemplatesForConf = self.env.GetValue("CONF_TEMPLATE_DIR") # get platform defined additional path - if (PlatTemplatesForConf is not None): + if PlatTemplatesForConf is not None: PlatTemplatesForConf = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath(PlatTemplatesForConf) TemplateDirList.insert(0, PlatTemplatesForConf) logging.debug(f"Platform defined override for Template Conf Files: {PlatTemplatesForConf}") @@ -448,25 +504,25 @@ def SetEnv(self) -> int: # parse target file ret = self.ParseTargetFile() - if (ret != 0): + if ret != 0: logging.critical("ParseTargetFile failed") return ret # parse tools_def file ret = self.ParseToolsDefFile() - if (ret != 0): + if ret != 0: logging.critical("ParseToolsDefFile failed") return ret # parse DSC file ret = self.ParseDscFile() - if (ret != 0): + if ret != 0: logging.critical("ParseDscFile failed") return ret # parse FDF file ret = self.ParseFdfFile() - if (ret != 0): + if ret != 0: logging.critical("ParseFdfFile failed") return ret @@ -476,29 +532,34 @@ def SetEnv(self) -> int: self.env.SetValue("OUTPUT_DIRECTORY", "Build", "default from uefi_build", True) # BUILD_OUT_TEMP is a path so the value should use native directory separators - self.env.SetValue("BUILD_OUT_TEMP", - os.path.normpath(os.path.join(self.ws, self.env.GetValue("OUTPUT_DIRECTORY"))), - "Computed in SetEnv") + self.env.SetValue( + "BUILD_OUT_TEMP", + os.path.normpath(os.path.join(self.ws, self.env.GetValue("OUTPUT_DIRECTORY"))), + "Computed in SetEnv", + ) target = self.env.GetValue("TARGET", None) if target is None: logging.error("Environment variable TARGET must be set to a build target.") - logging.error("Review the 'CLI Env Guide' section provided when using stuart_build "\ - "with the -help flag.") + logging.error("Review the 'CLI Env Guide' section provided when using stuart_build " "with the -help flag.") return -1 - self.env.SetValue("BUILD_OUTPUT_BASE", os.path.join(self.env.GetValue( - "BUILD_OUT_TEMP"), target + "_" + self.env.GetValue("TOOL_CHAIN_TAG")), "Computed in SetEnv") + self.env.SetValue( + "BUILD_OUTPUT_BASE", + os.path.join(self.env.GetValue("BUILD_OUT_TEMP"), target + "_" + self.env.GetValue("TOOL_CHAIN_TAG")), + "Computed in SetEnv", + ) # We have our build target now. Give platform build one more chance for target specific settings. ret = self.SetPlatformEnvAfterTarget() - if (ret != 0): + if ret != 0: logging.critical("SetPlatformEnvAfterTarget failed") return ret # set the build report file - self.env.SetValue("BUILDREPORT_FILE", os.path.join( - self.env.GetValue("BUILD_OUTPUT_BASE"), "BUILD_REPORT.TXT"), True) + self.env.SetValue( + "BUILDREPORT_FILE", os.path.join(self.env.GetValue("BUILD_OUTPUT_BASE"), "BUILD_REPORT.TXT"), True + ) # set environment variables for the build process os.environ["EFI_SOURCE"] = self.ws @@ -518,7 +579,7 @@ def FlashRomImage(self) -> int: # ----------------------------------------------------------------------- @classmethod - def PlatformPreBuild(self: 'UefiBuilder') -> int: + def PlatformPreBuild(self: "UefiBuilder") -> int: """Perform Platform PreBuild Steps. Returns: @@ -527,7 +588,7 @@ def PlatformPreBuild(self: 'UefiBuilder') -> int: return 0 @classmethod - def PlatformPostBuild(self: 'UefiBuilder') -> int: + def PlatformPostBuild(self: "UefiBuilder") -> int: """Perform Platform PostBuild Steps. Returns: @@ -536,7 +597,7 @@ def PlatformPostBuild(self: 'UefiBuilder') -> int: return 0 @classmethod - def SetPlatformEnv(self: 'UefiBuilder') -> int: + def SetPlatformEnv(self: "UefiBuilder") -> int: """Set and read Platform Env variables. This is performed before platform files like the DSC and FDF have been parsed. @@ -552,7 +613,7 @@ def SetPlatformEnv(self: 'UefiBuilder') -> int: return 0 @classmethod - def SetPlatformEnvAfterTarget(self: 'UefiBuilder') -> int: + def SetPlatformEnvAfterTarget(self: "UefiBuilder") -> int: """Set and read Platform Env variables after platform files have been parsed. Returns: @@ -561,7 +622,7 @@ def SetPlatformEnvAfterTarget(self: 'UefiBuilder') -> int: return 0 @classmethod - def SetPlatformDefaultEnv(self: 'UefiBuilder') -> list[namedtuple]: + def SetPlatformDefaultEnv(self: "UefiBuilder") -> list[namedtuple]: """Sets platform default environment variables by returning them as a list. Variables returned from this method are printed to the command line when @@ -579,7 +640,7 @@ def SetPlatformDefaultEnv(self: 'UefiBuilder') -> list[namedtuple]: return [] @classmethod - def PlatformBuildRom(self: 'UefiBuilder') -> int: + def PlatformBuildRom(self: "UefiBuilder") -> int: """Build the platform Rom. !!! tip @@ -589,7 +650,7 @@ def PlatformBuildRom(self: 'UefiBuilder') -> int: return 0 @classmethod - def PlatformFlashImage(self: 'UefiBuilder') -> int: + def PlatformFlashImage(self: "UefiBuilder") -> int: """Flashes the image to the system. Returns: @@ -598,7 +659,7 @@ def PlatformFlashImage(self: 'UefiBuilder') -> int: return 0 @classmethod - def PlatformGatedBuildShouldHappen(self: 'UefiBuilder') -> bool: + def PlatformGatedBuildShouldHappen(self: "UefiBuilder") -> bool: """Specifies if a gated build should happen. Returns: @@ -615,8 +676,7 @@ def ParseTargetFile(self) -> int: "Sets them so they can be overriden. """ - conf_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - "Conf", "target.txt") + conf_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath("Conf", "target.txt") if os.path.isfile(conf_file_path): # parse TargetTxt File logging.debug("Parse Target.txt file") @@ -628,8 +688,9 @@ def ParseTargetFile(self) -> int: # Set the two additional edk2 common macros. These will be resolved by now as # target.txt will set them if they aren't already set. - self.env.SetValue("TOOLCHAIN", self.env.GetValue("TOOL_CHAIN_TAG"), - "DSC Spec macro - set based on tool_Chain_tag") + self.env.SetValue( + "TOOLCHAIN", self.env.GetValue("TOOL_CHAIN_TAG"), "DSC Spec macro - set based on tool_Chain_tag" + ) # need to check how multiple arch are handled self.env.SetValue("ARCH", self.env.GetValue("TARGET_ARCH"), "DSC Spec macro - set based on target_arch") @@ -644,8 +705,7 @@ def ParseToolsDefFile(self) -> int: "Sets them so they can be overriden. """ - toolsdef_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - "Conf", "tools_def.txt") + toolsdef_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath("Conf", "tools_def.txt") if os.path.isfile(toolsdef_file_path): # parse ToolsdefTxt File logging.debug("Parse tools_def.txt file") @@ -658,8 +718,9 @@ def ParseToolsDefFile(self) -> int: tool_chain = self.env.GetValue("TOOL_CHAIN_TAG", None) if tool_chain is None: logging.error("Environment variable TOOL_CHAIN_TAG must be set to a tool chain.") - logging.error("Review the 'CLI Env Guide' section provided when using stuart_build "\ - "with the -help flag.") + logging.error( + "Review the 'CLI Env Guide' section provided when using stuart_build " "with the -help flag." + ) return -1 tag = "*_" + tool_chain + "_*_*_FAMILY" tool_chain_family = tdp.Dict.get(tag, "UNKNOWN") @@ -681,7 +742,8 @@ def ParseDscFile(self) -> int: logging.error("The DSC file was not set. Please set ACTIVE_PLATFORM") return -1 dsc_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - self.env.GetValue("ACTIVE_PLATFORM")) + self.env.GetValue("ACTIVE_PLATFORM") + ) if os.path.isfile(dsc_file_path): # parse DSC File logging.debug("Parse Active Platform DSC file: {0}".format(dsc_file_path)) @@ -708,11 +770,12 @@ def ParseFdfFile(self) -> int: it so we don't have to define things twice the FDF file usually comes from the Active Platform DSC file so it needs to be parsed first. """ - if (self.env.GetValue("FLASH_DEFINITION") is None): + if self.env.GetValue("FLASH_DEFINITION") is None: logging.debug("No flash definition set") return 0 fdf_file_path = self.edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - self.env.GetValue("FLASH_DEFINITION")) + self.env.GetValue("FLASH_DEFINITION") + ) if os.path.isfile(fdf_file_path): # parse the FDF file- fdf files have similar syntax to DSC and therefore parser works for both. logging.debug("Parse Active Flash Definition (FDF) file") @@ -735,6 +798,6 @@ def ParseFdfFile(self) -> int: def SetBasicDefaults(self) -> int: """Sets default values for numerous build control flow variables.""" self.env.SetValue("WORKSPACE", self.ws, "DEFAULT") - if (self.pp is not None): + if self.pp is not None: self.env.SetValue("PACKAGES_PATH", self.pp, "DEFAULT") return 0 diff --git a/edk2toolext/environment/var_dict.py b/edk2toolext/environment/var_dict.py index 6dfeef4c..220bc3dd 100644 --- a/edk2toolext/environment/var_dict.py +++ b/edk2toolext/environment/var_dict.py @@ -29,13 +29,13 @@ class EnvEntry(object): overridable (bool): If the value can be overwritten in the future """ - def __init__(self, value: str, comment: str, overridable: str=False) -> None: + def __init__(self, value: str, comment: str, overridable: str = False) -> None: """Inits an entry with the specified values.""" self.Value = value self.Comment = comment self.Overrideable = overridable - def PrintEntry(self, f: Optional[TextIO]=None) -> None: + def PrintEntry(self, f: Optional[TextIO] = None) -> None: """Prints the value. Args: @@ -43,14 +43,15 @@ def PrintEntry(self, f: Optional[TextIO]=None) -> None: """ print("Value: %s" % self.Value, file=f) print("Comment: %s" % self.Comment, file=f) - if (self.Overrideable): + if self.Overrideable: print("Value overridable", file=f) print("**********************", file=f) + # # Function used to override the value if option allows it # - def SetValue(self, value: str, comment: str, overridable: Optional[bool]=False) -> bool: + def SetValue(self, value: str, comment: str, overridable: Optional[bool] = False) -> bool: """Sets the value of the entry if it os overridable. Args: @@ -65,9 +66,8 @@ def SetValue(self, value: str, comment: str, overridable: Optional[bool]=False) if (value == self.Value) and (overridable == self.Overrideable): return True - if (not self.Overrideable): - logging.debug("Can't set value [%s] as it isn't overrideable. Previous comment %s" % ( - value, self.Comment)) + if not self.Overrideable: + logging.debug("Can't set value [%s] as it isn't overrideable. Previous comment %s" % (value, self.Comment)) return False self.Value = value @@ -97,7 +97,7 @@ def GetEntry(self, key: str) -> Optional[EnvEntry]: """Returns an entry in the Dstore Dict.""" return self.Dstore.get(key.upper()) - def __copy__(self) -> 'VarDict': + def __copy__(self) -> "VarDict": """Copies data into a new VarDict.""" new_copy = VarDict() new_copy.Logger = self.Logger @@ -111,7 +111,7 @@ def __copy__(self) -> 'VarDict': new_copy.SetValue(key, value, comment, override) return new_copy - def GetValue(self, k: str, default: Optional[str]=None) -> str: + def GetValue(self, k: str, default: Optional[str] = None) -> str: """Gets a value from the variable dictionary that was set during build. !!! note @@ -124,21 +124,20 @@ def GetValue(self, k: str, default: Optional[str]=None) -> str: Returns: (str): The value of the key, if present, else default value """ - if (k is None): - logging.debug( - "GetValue - Invalid Parameter key is None.") + if k is None: + logging.debug("GetValue - Invalid Parameter key is None.") return None key = k.upper() en = self.GetEntry(key) - if (en is not None): + if en is not None: self.Logger.debug("Key %s found. Value %s" % (key, en.GetValue())) return en.GetValue() else: self.Logger.debug("Key %s not found" % key) return default - def SetValue(self, k: str, v: str, comment: str, overridable: bool=False) -> bool: + def SetValue(self, k: str, v: str, comment: str, overridable: bool = False) -> bool: """Sets an environment variable to be used throughout the build. Args: @@ -156,11 +155,11 @@ def SetValue(self, k: str, v: str, comment: str, overridable: bool=False) -> boo key = k.upper() en = self.GetEntry(key) if v is None: - value = ''.join(choice(ascii_letters) for _ in range(20)) + value = "".join(choice(ascii_letters) for _ in range(20)) else: value = str(v) self.Logger.debug("Trying to set key %s to value %s" % (k, v)) - if (en is None): + if en is None: # new entry en = EnvEntry(value, comment, overridable) self.Dstore[key] = en @@ -182,13 +181,13 @@ def AllowOverride(self, k: str) -> bool: """ key = k.upper() en = self.GetEntry(key) - if (en is not None): + if en is not None: self.Logger.warning("Allowing Override for key %s" % k) en.AllowOverride() return True return False - def GetBuildValue(self, key: str, BuildType: Optional[str]=None) -> str: + def GetBuildValue(self, key: str, BuildType: Optional[str] = None) -> str: """Get a build var value for given key and buildtype. !!! tip @@ -211,17 +210,15 @@ def GetBuildValue(self, key: str, BuildType: Optional[str]=None) -> str: """ rv = None - if (BuildType is None): + if BuildType is None: BuildType = self.GetValue("TARGET") - if (BuildType is None): - logging.debug( - "GetBuildValue - Invalid Parameter BuildType is None and Target Not set. Key is: " + key) + if BuildType is None: + logging.debug("GetBuildValue - Invalid Parameter BuildType is None and Target Not set. Key is: " + key) return None - if (key is None): - logging.debug( - "GetBuildValue - Invalid Parameter key is None. BuildType is: " + BuildType) + if key is None: + logging.debug("GetBuildValue - Invalid Parameter key is None. BuildType is: " + BuildType) return None ty = BuildType.upper().strip() @@ -229,7 +226,7 @@ def GetBuildValue(self, key: str, BuildType: Optional[str]=None) -> str: # see if specific k = "BLD_" + ty + "_" + tk rv = self.GetValue(k) - if (rv is None): + if rv is None: # didn't fine build type specific so check for generic k = "BLD_*_" + tk rv = self.GetValue(k) @@ -237,7 +234,7 @@ def GetBuildValue(self, key: str, BuildType: Optional[str]=None) -> str: # return value...if not found should return None return rv - def GetAllBuildKeyValues(self, BuildType: Optional[str]=None) -> dict: + def GetAllBuildKeyValues(self, BuildType: Optional[str] = None) -> dict: """Gets a dictionary for all build vars. !!! tip @@ -259,12 +256,11 @@ def GetAllBuildKeyValues(self, BuildType: Optional[str]=None) -> dict: """ returndict = {} - if (BuildType is None): + if BuildType is None: BuildType = self.GetValue("TARGET") - if (BuildType is None): - logging.debug( - "GetAllBuildKeyValues - Invalid Parameter BuildType is None and Target Not Set.") + if BuildType is None: + logging.debug("GetAllBuildKeyValues - Invalid Parameter BuildType is None and Target Not Set.") return returndict ty = BuildType.upper().strip() @@ -272,7 +268,7 @@ def GetAllBuildKeyValues(self, BuildType: Optional[str]=None) -> dict: # get all the generic build options for key, value in self.Dstore.items(): - if (key.startswith("BLD_*_")): + if key.startswith("BLD_*_"): k = key[6:] returndict[k] = value.GetValue() @@ -280,7 +276,7 @@ def GetAllBuildKeyValues(self, BuildType: Optional[str]=None) -> dict: # figure out offset part of key name to strip ks = len(ty) + 5 for key, value in self.Dstore.items(): - if (key.startswith("BLD_" + ty + "_")): + if key.startswith("BLD_" + ty + "_"): k = key[ks:] returndict[k] = value.GetValue() @@ -295,11 +291,11 @@ def GetAllNonBuildKeyValues(self) -> dict: returndict = {} # get all the generic build options for key, value in self.Dstore.items(): - if (not key.startswith("BLD_")): + if not key.startswith("BLD_"): returndict[key] = value.GetValue() return returndict - def PrintAll(self, fp: Optional[TextIO] =None) -> None: + def PrintAll(self, fp: Optional[TextIO] = None) -> None: """Prints all variables. If fp is not none, writes to a fp also @@ -308,10 +304,10 @@ def PrintAll(self, fp: Optional[TextIO] =None) -> None: fp (str): file pointer to print to """ f = None - if (fp is not None): - f = open(fp, 'a+') + if fp is not None: + f = open(fp, "a+") for key, value in self.Dstore.items(): print("Key = %s" % key, file=f) value.PrintEntry(f) - if (f): + if f: f.close() diff --git a/edk2toolext/environment/version_aggregator.py b/edk2toolext/environment/version_aggregator.py index ebdec0b8..a42b8454 100644 --- a/edk2toolext/environment/version_aggregator.py +++ b/edk2toolext/environment/version_aggregator.py @@ -11,6 +11,7 @@ Used to facilitate the collection of information regarding the tools, binaries, submodule configuration used in a build. """ + import copy import logging from enum import Enum @@ -25,13 +26,14 @@ class version_aggregator(object): Used to facilitate the collection of information regarding the tools, binaries, submodule configuration used in a build. """ + def __init__(self) -> None: """Inits an empty verion aggregator.""" super(version_aggregator, self).__init__() self._Versions = {} self._logger = logging.getLogger("version_aggregator") - def ReportVersion(self, key: str, value: str, versionType:str, path:Optional[str]=None) -> None: + def ReportVersion(self, key: str, value: str, versionType: str, path: Optional[str] = None) -> None: """Report the version of something. Args: @@ -45,19 +47,15 @@ def ReportVersion(self, key: str, value: str, versionType:str, path:Optional[str if old_version["version"] == value and old_version["path"] == path: self._logger.info(f"version_aggregator: {key} re-registered at {path}") else: - error = "version_aggregator: {0} key registered with a different value\n\t" \ - "Old:{1}@{3}\n\tNew:{2}@{4}\n".format( - key, old_version["version"], value, old_version["path"], path) + error = ( + "version_aggregator: {0} key registered with a different value\n\t" + "Old:{1}@{3}\n\tNew:{2}@{4}\n".format(key, old_version["version"], value, old_version["path"], path) + ) self._logger.error(error) raise ValueError(error) return - self._Versions[key] = { - "name": key, - "version": value, - "type": versionType.name, - "path": path - } + self._Versions[key] = {"name": key, "version": value, "type": versionType.name, "path": path} self._logger.debug("version_aggregator logging version: {0}".format(str(self._Versions[key]))) def Print(self) -> None: @@ -68,7 +66,7 @@ def Print(self) -> None: if len(self._Versions) == 0: print("VERSION AGGREGATOR IS EMPTY") - def GetAggregatedVersionInformation(self) -> 'version_aggregator': + def GetAggregatedVersionInformation(self) -> "version_aggregator": """Returns a copy of the aggregated information.""" return copy.deepcopy(self._Versions) @@ -87,6 +85,7 @@ class VersionTypes(Enum): INFO: miscellaneous information. PIP: a python pip package. """ + TOOL = 1 COMMIT = 2 BINARY = 3 diff --git a/edk2toolext/image_validation.py b/edk2toolext/image_validation.py index 55b143cd..50455e64 100644 --- a/edk2toolext/image_validation.py +++ b/edk2toolext/image_validation.py @@ -28,7 +28,7 @@ def has_characteristic(data: int, mask: int) -> bool: """Checks if data has a specific mask.""" - return ((data & mask) == mask) + return (data & mask) == mask def set_bit(data: int, bit: int) -> int: @@ -55,10 +55,10 @@ def get_nx_compat_flag(pe: PE) -> PE: dllchar = pe.OPTIONAL_HEADER.DllCharacteristics if has_characteristic(dllchar, 256): # 256 (8th bit) is the mask - logging.info('True') + logging.info("True") return 1 else: - logging.info('False') + logging.info("False") return 0 @@ -86,14 +86,16 @@ def fill_missing_requirements(default: dict, target: dict) -> dict: class Result: """Test results.""" - PASS = '[PASS]' - WARN = '[WARNING]' - SKIP = '[SKIP]' - FAIL = '[FAIL]' + + PASS = "[PASS]" + WARN = "[WARNING]" + SKIP = "[SKIP]" + FAIL = "[FAIL]" class TestInterface: """Interface for creating tests to execute on parsed PE/COFF files.""" + def name(self) -> str: """Returns the name of the test. @@ -118,7 +120,8 @@ def execute(self, pe: PE, config_data: dict) -> Result: class TestManager(object): """Manager responsible for executing all tests on all parsed PE/COFF files.""" - def __init__(self, config_data: Optional[dict]=None) -> None: + + def __init__(self, config_data: Optional[dict] = None) -> None: """Inits the TestManager with configuration data. Args: @@ -134,115 +137,68 @@ def __init__(self, config_data: Optional[dict]=None) -> None: "X64": "IMAGE_FILE_MACHINE_AMD64", "IA32": "IMAGE_FILE_MACHINE_I386", "AARCH64": "IMAGE_FILE_MACHINE_ARM64", - "ARM": "IMAGE_FILE_MACHINE_ARM" + "ARM": "IMAGE_FILE_MACHINE_ARM", }, "IMAGE_FILE_MACHINE_AMD64": { "DEFAULT": { "DATA_CODE_SEPARATION": True, - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" - ], - "ALIGNMENT": [ - { - "COMPARISON": "==", - "VALUE": 4096 - } - ] - }, - "APP": { - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_APPLICATION" - ] + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_ROM"], + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 4096}], }, + "APP": {"ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_APPLICATION"]}, "DRIVER": { "ALLOWED_SUBSYSTEMS": [ "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER" - "IMAGE_SUBSYSTEM_EFI_ROM" + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER" "IMAGE_SUBSYSTEM_EFI_ROM", ] }, }, "IMAGE_FILE_MACHINE_ARM": { "DEFAULT": { "DATA_CODE_SEPARATION": True, - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" - ], - "ALIGNMENT": [ - { - "COMPARISON": "==", - "VALUE": 4096 - } - ] + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_ROM"], + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 4096}], }, "APP": { - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_APPLICATION" - ], + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_APPLICATION"], }, "DRIVER": { "ALLOWED_SUBSYSTEMS": [ "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" - ]} + "IMAGE_SUBSYSTEM_EFI_ROM", + ] + }, }, "IMAGE_FILE_MACHINE_ARM64": { "DEFAULT": { "DATA_CODE_SEPARATION": True, - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" - ], - "ALIGNMENT": [ - { - "COMPARISON": "==", - "VALUE": 4096 - } - ] - }, - "APP": { - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_APPLICATION" - ] + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_ROM"], + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 4096}], }, + "APP": {"ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_APPLICATION"]}, "DRIVER": { "ALLOWED_SUBSYSTEMS": [ "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" + "IMAGE_SUBSYSTEM_EFI_ROM", ] - } + }, }, "IMAGE_FILE_MACHINE_I386": { "DEFAULT": { "DATA_CODE_SEPARATION": True, - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM" - ], - "ALIGNMENT": [ - { - "COMPARISON": "==", - "VALUE": 4096 - } - ] - }, - "APP": { - "ALLOWED_SUBSYSTEMS": [ - "IMAGE_SUBSYSTEM_EFI_APPLICATION" - ] + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_ROM"], + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 4096}], }, + "APP": {"ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_APPLICATION"]}, "DRIVER": { "ALLOWED_SUBSYSTEMS": [ "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER" - "IMAGE_SUBSYSTEM_EFI_ROM" + "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER" "IMAGE_SUBSYSTEM_EFI_ROM", ] - } - } + }, + }, } def add_test(self, test: TestInterface) -> None: @@ -265,7 +221,7 @@ def add_tests(self, tests: list[TestInterface]) -> None: """ self.tests.extend(tests) - def run_tests(self, pe: PE, profile: str="DEFAULT") -> Result: + def run_tests(self, pe: PE, profile: str = "DEFAULT") -> Result: """Runs all tests that have been added to the test manager. Tests will be executed in the order added @@ -282,7 +238,7 @@ def run_tests(self, pe: PE, profile: str="DEFAULT") -> Result: # Catch any invalid profiles machine_type = MACHINE_TYPE[pe.FILE_HEADER.Machine] if not self.config_data[machine_type].get(profile): - logging.error(f'Profile type {profile} is invalid. Exiting...') + logging.error(f"Profile type {profile} is invalid. Exiting...") return Result.FAIL # Fill any missing configurations for the specific module type with the default @@ -290,27 +246,21 @@ def run_tests(self, pe: PE, profile: str="DEFAULT") -> Result: target = self.config_data[machine_type][profile] target_requirements = fill_missing_requirements(default, target) - target_info = { - "MACHINE_TYPE": machine_type, - "PROFILE": profile - } - test_config_data = { - "TARGET_INFO": target_info, - "TARGET_REQUIREMENTS": target_requirements - } + target_info = {"MACHINE_TYPE": machine_type, "PROFILE": profile} + test_config_data = {"TARGET_INFO": target_info, "TARGET_REQUIREMENTS": target_requirements} - logging.debug(f'Executing tests with settings [{machine_type}][{profile}]') + logging.debug(f"Executing tests with settings [{machine_type}][{profile}]") overall_result = Result.PASS for test in self.tests: - logging.debug(f'Starting test: [{test.name()}]') + logging.debug(f"Starting test: [{test.name()}]") result = test.execute(pe, test_config_data) # Overall Result can only go lower (Pass -> Warn -> Fail) if result == Result.PASS: - logging.debug(f'{result}') + logging.debug(f"{result}") elif result == Result.SKIP: - logging.debug(f'{result}: No Requirements for [{machine_type}][{profile}]') + logging.debug(f"{result}: No Requirements for [{machine_type}][{profile}]") elif overall_result == Result.PASS: overall_result = result elif overall_result == Result.WARN and result == Result.FAIL: @@ -345,7 +295,7 @@ class TestWriteExecuteFlags(TestInterface): def name(self) -> str: """Returns the name of the test.""" - return 'Section data / code separation verification' + return "Section data / code separation verification" def execute(self, pe: PE, config_data: dict) -> Result: """Executes the test on the pefile. @@ -363,11 +313,11 @@ def execute(self, pe: PE, config_data: dict) -> Result: return Result.SKIP for section in pe.sections: - if (has_characteristic(section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_EXECUTE"]) - and has_characteristic(section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_WRITE"])): - - logging.error(f'[{Result.FAIL}]: Section [{section.Name.decode().strip()}] \ - should not be both Write and Execute') + if has_characteristic( + section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_EXECUTE"] + ) and has_characteristic(section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_WRITE"]): + logging.error(f"[{Result.FAIL}]: Section [{section.Name.decode().strip()}] \ + should not be both Write and Execute") return Result.FAIL return Result.PASS @@ -395,7 +345,7 @@ class TestSectionAlignment(TestInterface): def name(self) -> str: """Returns the name of the test.""" - return 'Section alignment verification' + return "Section alignment verification" def execute(self, pe: PE, config_data: dict) -> Result: """Executes the test on the pefile. @@ -474,7 +424,7 @@ class TestSubsystemValue(TestInterface): def name(self) -> str: """Returns the name of the test.""" - return 'Subsystem type verification' + return "Subsystem type verification" def execute(self, pe: PE, config_data: dict) -> Result: """Executes the test on the pefile. @@ -499,20 +449,22 @@ def execute(self, pe: PE, config_data: dict) -> Result: return Result.WARN if subsystem is None: - logging.warning(f'[{Result.WARN}]: Subsystem type is not present in the optional header.') + logging.warning(f"[{Result.WARN}]: Subsystem type is not present in the optional header.") return Result.WARN actual_subsystem = SUBSYSTEM_TYPE.get(subsystem) if actual_subsystem is None: - logging.error(f'[{Result.WARN}]: Invalid Subsystem present') + logging.error(f"[{Result.WARN}]: Invalid Subsystem present") return Result.FAIL if actual_subsystem in subsystems: return Result.PASS else: - logging.error(f'{Result.FAIL}: Submodule Type [{actual_subsystem}] not allowed.') + logging.error(f"{Result.FAIL}: Submodule Type [{actual_subsystem}] not allowed.") return Result.FAIL + + ########################### # TESTS END # ########################### @@ -520,38 +472,29 @@ def execute(self, pe: PE, config_data: dict) -> Result: def get_cli_args(args: Sequence[str]) -> argparse.Namespace: """Adds CLI arguments for using the image validation tool.""" - parser = argparse.ArgumentParser(description='A Image validation tool for memory mitigation') - - parser.add_argument('-i', '--file', - type=str, - required=True, - help='path to the image that needs validated.') - parser.add_argument('-d', '--debug', - action='store_true', - default=False) - - parser.add_argument('-p', '--profile', - type=str, - default=None, - help='the profile config to be verified against. \ - Will use the default, if not provided') + parser = argparse.ArgumentParser(description="A Image validation tool for memory mitigation") + + parser.add_argument("-i", "--file", type=str, required=True, help="path to the image that needs validated.") + parser.add_argument("-d", "--debug", action="store_true", default=False) + + parser.add_argument( + "-p", + "--profile", + type=str, + default=None, + help="the profile config to be verified against. \ + Will use the default, if not provided", + ) group = parser.add_mutually_exclusive_group() - group.add_argument('--set-nx-compat', - action='store_true', - default=False, - help='sets the NX_COMPAT flag') + group.add_argument("--set-nx-compat", action="store_true", default=False, help="sets the NX_COMPAT flag") - group.add_argument('--clear-nx-compat', - action='store_true', - default=False, - help='clears the NX_COMPAT flag') + group.add_argument("--clear-nx-compat", action="store_true", default=False, help="clears the NX_COMPAT flag") - group.add_argument('--get-nx-compat', - action='store_true', - default=False, - help='returns the value of the NX_COMPAT flag') + group.add_argument( + "--get-nx-compat", action="store_true", default=False, help="returns the value of the NX_COMPAT flag" + ) return parser.parse_args(args) @@ -559,7 +502,7 @@ def get_cli_args(args: Sequence[str]) -> argparse.Namespace: def main() -> None: """Main entry point into the image validation tool.""" # setup main console as logger - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.INFO) console = edk2_logging.setup_console_logging(False) logger.addHandler(console) @@ -601,14 +544,14 @@ def main() -> None: else: result = test_manager.run_tests(pe, args.profile) - logging.info(f'Overall Result: {result}') + logging.info(f"Overall Result: {result}") if result == Result.SKIP: - logging.info('No Test requirements in the config file for this file.') + logging.info("No Test requirements in the config file for this file.") elif result == Result.PASS or result == Result.WARN: sys.exit(0) else: sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/edk2toolext/invocables/edk2_ci_build.py b/edk2toolext/invocables/edk2_ci_build.py index e4792836..ed31f7eb 100644 --- a/edk2toolext/invocables/edk2_ci_build.py +++ b/edk2toolext/invocables/edk2_ci_build.py @@ -13,6 +13,7 @@ file. This provides platform specific information to Edk2CiBuild invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging import os @@ -84,12 +85,25 @@ def GetPluginSettings(self) -> dict[str, Any]: class Edk2CiBuild(Edk2MultiPkgAwareInvocable): """Invocable supporting an iterative multi-package build and test process leveraging CI build plugins.""" + def AddCommandLineOptions(self, parser: argparse.ArgumentParser) -> None: """Adds command line arguments to Edk2CiBuild.""" - parser.add_argument('-d', '--disable-all', dest="disable", action="store_true", default=False, - help="Disable all plugins. Use =run to re-enable specific plugins") - parser.add_argument('-f', '--fail-fast', dest="fail_fast", action="store_true", default=False, - help="Exit on the first plugin failure.") + parser.add_argument( + "-d", + "--disable-all", + dest="disable", + action="store_true", + default=False, + help="Disable all plugins. Use =run to re-enable specific plugins", + ) + parser.add_argument( + "-f", + "--fail-fast", + dest="fail_fast", + action="store_true", + default=False, + help="Exit on the first plugin failure.", + ) super().AddCommandLineOptions(parser) def RetrieveCommandLineOptions(self, args: argparse.Namespace) -> None: @@ -130,7 +144,8 @@ def Go(self) -> int: # Bring up the common minimum environment. logging.log(edk2_logging.SECTION, "Getting Environment") (build_env, shell_env) = self_describing_environment.BootstrapEnvironment( - self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories()) + self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories() + ) env = shell_environment.GetBuildVars() # Bind our current execution environment into the shell vars. @@ -172,16 +187,18 @@ def Go(self) -> int: ts = JunitReport.create_new_testsuite(pkgToRunOn, package_class_name) packagebuildlog_path = os.path.join(log_directory, pkgToRunOn) _, txt_handle = edk2_logging.setup_txt_logger( - packagebuildlog_path, f"BUILDLOG_{pkgToRunOn}", logging_level=logging.DEBUG, isVerbose=True) + packagebuildlog_path, f"BUILDLOG_{pkgToRunOn}", logging_level=logging.DEBUG, isVerbose=True + ) loghandle = [txt_handle] shell_environment.CheckpointBuildVars() env = shell_environment.GetBuildVars() # load the package level .ci.yaml pkg_config_file = edk2path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - os.path.join(pkgToRunOn, pkgToRunOn + ".ci.yaml")) - if (pkg_config_file): - with open(pkg_config_file, 'r') as f: + os.path.join(pkgToRunOn, pkgToRunOn + ".ci.yaml") + ) + if pkg_config_file: + with open(pkg_config_file, "r") as f: pkg_config = yaml.safe_load(f) else: logging.info(f"No Pkg Config file for {pkgToRunOn}") @@ -197,8 +214,7 @@ def Go(self) -> int: for Descriptor in pluginList: # For each target for target in self.requested_target_list: - - if (target not in Descriptor.Obj.RunsOnTargetList()): + if target not in Descriptor.Obj.RunsOnTargetList(): continue edk2_logging.log_progress(f"--Running {pkgToRunOn}: {Descriptor.Name} {target} --") @@ -208,8 +224,9 @@ def Go(self) -> int: # Skip all plugins not marked as "run" if disable is set if self.disable_plugins and env.GetValue(Descriptor.Module.upper(), "skip") != "run": - edk2_logging.log_progress("--->Test Disabled due to disable-all flag!" - f" {Descriptor.Module} {target}") + edk2_logging.log_progress( + "--->Test Disabled due to disable-all flag!" f" {Descriptor.Module} {target}" + ) edk2_logging.log_progress(f"--->Set {Descriptor.Module}=run on the command line to run anyway.") continue @@ -221,12 +238,16 @@ def Go(self) -> int: plugin_output_stream = edk2_logging.create_output_stream() # merge the repo level and package level for this specific plugin - pkg_plugin_configuration = Edk2CiBuild.merge_config(self.PlatformSettings.GetPluginSettings(), - pkg_config, Descriptor.descriptor) + pkg_plugin_configuration = Edk2CiBuild.merge_config( + self.PlatformSettings.GetPluginSettings(), pkg_config, Descriptor.descriptor + ) # Still need to see if the package decided this should be skipped - if pkg_plugin_configuration is None or\ - "skip" in pkg_plugin_configuration and pkg_plugin_configuration["skip"]: + if ( + pkg_plugin_configuration is None + or "skip" in pkg_plugin_configuration + and pkg_plugin_configuration["skip"] + ): tc.SetSkipped() edk2_logging.log_progress("--->Test Skipped by package! %s" % Descriptor.Name) else: @@ -241,25 +262,30 @@ def Go(self) -> int: # - Plugin Helper Obj Instance # - testcase Object used for outputing junit results # - output_stream the StringIO output stream from this plugin - rc = Descriptor.Obj.RunBuildPlugin(pkgToRunOn, edk2path, pkg_plugin_configuration, - env, self.plugin_manager, self.helper, - tc, plugin_output_stream) + rc = Descriptor.Obj.RunBuildPlugin( + pkgToRunOn, + edk2path, + pkg_plugin_configuration, + env, + self.plugin_manager, + self.helper, + tc, + plugin_output_stream, + ) except Exception as exp: _, _, exc_traceback = sys.exc_info() logging.critical("EXCEPTION: {0}".format(exp)) exceptionPrint = traceback.format_exception(type(exp), exp, exc_traceback) logging.critical(" ".join(exceptionPrint)) - tc.SetError("Exception: {0}".format( - exp), "UNEXPECTED EXCEPTION") + tc.SetError("Exception: {0}".format(exp), "UNEXPECTED EXCEPTION") rc = 1 if rc is None or rc > 0: failure_num += 1 - logging.error( - f"--->Test Failed: {Descriptor.Name} {target} returned \"{rc}\"") + logging.error(f'--->Test Failed: {Descriptor.Name} {target} returned "{rc}"') if self.fail_fast: - logging.error('Exiting Early due to --fail-fast flag.') + logging.error("Exiting Early due to --fail-fast flag.") JunitReport.Output(os.path.join(self.GetWorkspaceRoot(), "Build", "TestSuites.xml")) return failure_num elif rc < 0: @@ -281,7 +307,7 @@ def Go(self) -> int: JunitReport.Output(os.path.join(self.GetWorkspaceRoot(), "Build", "TestSuites.xml")) # Print Overall Success - if (failure_num != 0): + if failure_num != 0: logging.error("Overall Build Status: Error") edk2_logging.log_progress(f"There were {failure_num} failures out of {total_num} attempts") else: @@ -290,7 +316,7 @@ def Go(self) -> int: return failure_num @staticmethod - def merge_config(gbl_config: dict, pkg_config: dict, descriptor: Optional[dict]=None) -> dict: + def merge_config(gbl_config: dict, pkg_config: dict, descriptor: Optional[dict] = None) -> dict: """Merge two configurations. One global and one specificto the package to create the proper config diff --git a/edk2toolext/invocables/edk2_ci_setup.py b/edk2toolext/invocables/edk2_ci_setup.py index a25836c8..d363e71e 100644 --- a/edk2toolext/invocables/edk2_ci_setup.py +++ b/edk2toolext/invocables/edk2_ci_setup.py @@ -12,6 +12,7 @@ file. This provides platform specific information to Edk2CiSetup invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging import os @@ -71,16 +72,36 @@ class Edk2CiBuildSetup(Edk2MultiPkgAwareInvocable): Edk2CiBuildSetup sets up the necessary environment for Edk2CiBuild by preparing all necessary submodules. """ + def AddCommandLineOptions(self, parser: argparse.ArgumentParser) -> None: """Adds command line arguments to Edk2CiBuild.""" - parser.add_argument('-ignore', '--ignore-git', dest="git_ignore", action="store_true", - help="Whether to ignore errors in the git cloning process", default=False) - parser.add_argument('--omnicache', '--reference', dest='omnicache_path', - default=os.environ.get('OMNICACHE_PATH')) - parser.add_argument('-force', '--force-git', dest="git_force", action="store_true", - help="Whether to force git repos to clone in the git cloning process", default=False) - parser.add_argument('-update-git', '--update-git', dest="git_update", action="store_true", - help="Whether to update git repos as needed in the git cloning process", default=False) + parser.add_argument( + "-ignore", + "--ignore-git", + dest="git_ignore", + action="store_true", + help="Whether to ignore errors in the git cloning process", + default=False, + ) + parser.add_argument( + "--omnicache", "--reference", dest="omnicache_path", default=os.environ.get("OMNICACHE_PATH") + ) + parser.add_argument( + "-force", + "--force-git", + dest="git_force", + action="store_true", + help="Whether to force git repos to clone in the git cloning process", + default=False, + ) + parser.add_argument( + "-update-git", + "--update-git", + dest="git_update", + action="store_true", + help="Whether to update git repos as needed in the git cloning process", + default=False, + ) super().AddCommandLineOptions(parser) def RetrieveCommandLineOptions(self, args: argparse.Namespace) -> None: @@ -115,10 +136,14 @@ def Go(self) -> int: """Executes the core functionality of the Edk2CiSetup invocable.""" setup_dependencies = self.PlatformSettings.GetDependencies() logging.debug(f"Dependencies list {setup_dependencies}") - repos = repo_resolver.resolve_all(self.GetWorkspaceRoot(), - setup_dependencies, - ignore=self.git_ignore, force=self.git_force, - update_ok=self.git_update, omnicache_dir=self.omnicache_path) + repos = repo_resolver.resolve_all( + self.GetWorkspaceRoot(), + setup_dependencies, + ignore=self.git_ignore, + force=self.git_force, + update_ok=self.git_update, + omnicache_dir=self.omnicache_path, + ) logging.info(f"Repo resolver resolved {repos}") diff --git a/edk2toolext/invocables/edk2_multipkg_aware_invocable.py b/edk2toolext/invocables/edk2_multipkg_aware_invocable.py index 947a6581..6678e5fb 100644 --- a/edk2toolext/invocables/edk2_multipkg_aware_invocable.py +++ b/edk2toolext/invocables/edk2_multipkg_aware_invocable.py @@ -20,6 +20,7 @@ platform. Platform specific data is provided via the MultiPkgAwareSettingsInterface """ + import argparse from typing import Iterable @@ -186,16 +187,32 @@ def AddCommandLineOptions(self, parserObj: argparse.ArgumentParser) -> None: arch_options = f' \n[{",".join(self.PlatformSettings.GetArchitecturesSupported())}]' target_options = f' \n[{",".join(self.PlatformSettings.GetTargetsSupported())}]' - parserObj.add_argument('-p', '--pkg', '--pkg-dir', dest='packageList', type=str, - help='CSV of EDKII packages / folder containing packages to operate on. ' - f'{pkg_options}', - action="append", default=[]) - parserObj.add_argument('-a', '--arch', dest="requested_arch", type=str, default=None, - help='CSV of architectures to operate on.' - f'{arch_options}') - parserObj.add_argument('-t', '--target', dest='requested_target', type=str, default=None, - help='CSV of targets to operate on.' - f'{target_options}') + parserObj.add_argument( + "-p", + "--pkg", + "--pkg-dir", + dest="packageList", + type=str, + help="CSV of EDKII packages / folder containing packages to operate on. " f"{pkg_options}", + action="append", + default=[], + ) + parserObj.add_argument( + "-a", + "--arch", + dest="requested_arch", + type=str, + default=None, + help="CSV of architectures to operate on." f"{arch_options}", + ) + parserObj.add_argument( + "-t", + "--target", + dest="requested_target", + type=str, + default=None, + help="CSV of targets to operate on." f"{target_options}", + ) def RetrieveCommandLineOptions(self, args: argparse.Namespace) -> None: """Retrieve command line options from the argparser .""" @@ -220,14 +237,14 @@ def RetrieveCommandLineOptions(self, args: argparse.Namespace) -> None: def InputParametersConfiguredCallback(self) -> None: """Initializes the environment once input parameters are collected.""" - if (len(self.requested_package_list) == 0): + if len(self.requested_package_list) == 0: self.requested_package_list = list(self.PlatformSettings.GetPackagesSupported()) self.PlatformSettings.SetPackages(self.requested_package_list) - if (len(self.requested_architecture_list) == 0): + if len(self.requested_architecture_list) == 0: self.requested_architecture_list = list(self.PlatformSettings.GetArchitecturesSupported()) self.PlatformSettings.SetArchitectures(self.requested_architecture_list) - if (len(self.requested_target_list) == 0): + if len(self.requested_target_list) == 0: self.requested_target_list = list(self.PlatformSettings.GetTargetsSupported()) self.PlatformSettings.SetTargets(self.requested_target_list) diff --git a/edk2toolext/invocables/edk2_parse.py b/edk2toolext/invocables/edk2_parse.py index c4955412..765e297b 100644 --- a/edk2toolext/invocables/edk2_parse.py +++ b/edk2toolext/invocables/edk2_parse.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """An invocable to run workspace parsers on a workspace and generate a database.""" + import argparse import logging import os @@ -43,31 +44,31 @@ class AppendSplitAction(argparse.Action): example: -p Pkg1,Pkg2 -p Pkg3 => ['Pkg1', 'Pkg2', 'Pkg3'] """ + def __call__( - self, - parser: ArgumentParser, - namespace: Namespace, - values: Sequence[str], - option_string: Optional[str] = None - ) -> None: + self, parser: ArgumentParser, namespace: Namespace, values: Sequence[str], option_string: Optional[str] = None + ) -> None: """The command to invoke the action.""" items = getattr(namespace, self.dest, []) - items.extend(values.split(',')) + items.extend(values.split(",")) setattr(namespace, self.dest, items) class ParseSettingsManager(MultiPkgAwareSettingsInterface): """Settings to support ReportSettingsManager functionality.""" + def GetPackagesSupported(self) -> Iterable[str]: """Returns an iterable of edk2 packages supported by this build.""" # Re-define and return an empty list instead of raising an exception as this is only needed when parsing # based off of a CI Settings File. return [] + def GetArchitecturesSupported(self) -> Iterable: """Returns an iterable of edk2 architectures supported by this build.""" # Re-define and return an empty list instead of raising an exception as this is only needed when parsing # based off of a CI Settings File. return [] + def GetTargetsSupported(self) -> Iterable: """Returns an iterable of edk2 target tags supported by this build.""" # Re-define and return an empty list instead of raising an exception as this is only needed when parsing @@ -107,20 +108,40 @@ def GetVerifyCheckRequired(self) -> bool: def AddCommandLineOptions(self, parserObj: ArgumentParser) -> None: """Adds the command line options.""" - super().AddCommandLineOptions(parserObj) # Adds the CI Settings File options - parserObj.add_argument('--clear', '--Clear', "--CLEAR", dest='clear', action='store_true', - help="Deletes the database before parsing the environment.") - parserObj.add_argument('--source-stats', '-S', dest='source_stats', action='store_true', default=False, - help="Uses pygount to generate code statistics for each parsed source file, including " - "lines of code, comments, and blank lines. Greatly increases parse time.") - parserObj.add_argument('-l', '--load-table', '--Load-Table', '--LOAD-TABLE', dest='extra_tables', - action=AppendSplitAction, default=[], metavar='', - help="Comma separated path to a python file containing a `TableParser`(s). " - "Includes this table with the default tables. Can be provided multiple times") + super().AddCommandLineOptions(parserObj) # Adds the CI Settings File options + parserObj.add_argument( + "--clear", + "--Clear", + "--CLEAR", + dest="clear", + action="store_true", + help="Deletes the database before parsing the environment.", + ) + parserObj.add_argument( + "--source-stats", + "-S", + dest="source_stats", + action="store_true", + default=False, + help="Uses pygount to generate code statistics for each parsed source file, including " + "lines of code, comments, and blank lines. Greatly increases parse time.", + ) + parserObj.add_argument( + "-l", + "--load-table", + "--Load-Table", + "--LOAD-TABLE", + dest="extra_tables", + action=AppendSplitAction, + default=[], + metavar="", + help="Comma separated path to a python file containing a `TableParser`(s). " + "Includes this table with the default tables. Can be provided multiple times", + ) def RetrieveCommandLineOptions(self, args: Namespace) -> None: """Retrives the command line options.""" - super().RetrieveCommandLineOptions(args) # Stores the CI Settings File options + super().RetrieveCommandLineOptions(args) # Stores the CI Settings File options self.clear = args.clear self.is_uefi_builder = locate_class_in_module(self.PlatformModule, UefiBuilder) is not None self.extra_tables = args.extra_tables @@ -133,8 +154,9 @@ def GetLoggingFileName(self, loggerType: str) -> str: def Go(self) -> int: """Executes the invocable. Runs the subcommand specified by the user.""" - logging.warning("stuart_parse is in active development. Please report any issues to the edk2-pytool-extensions " - "repo.") + logging.warning( + "stuart_parse is in active development. Please report any issues to the edk2-pytool-extensions " "repo." + ) db_path = Path(self.GetWorkspaceRoot()) / self.GetLoggingFolderRelativeToRoot() / DB_NAME pathobj = Edk2Path(self.GetWorkspaceRoot(), self.GetPackagesPath()) env = shell_environment.GetBuildVars() @@ -163,7 +185,7 @@ def Go(self) -> int: else: self.parse_with_ci_settings(db, pathobj, env) - logging.info(f'Database generated at {db_path}.') + logging.info(f"Database generated at {db_path}.") return 0 def parse_with_builder_settings(self, db: Edk2DB, pathobj: Edk2Path, env: VarDict) -> int: @@ -179,8 +201,9 @@ def parse_with_builder_settings(self, db: Edk2DB, pathobj: Edk2Path, env: VarDic build_settings.SkipBuild = True build_settings.SkipPostBuild = True build_settings.FlashImage = False - build_settings.Go(self.GetWorkspaceRoot(), os.pathsep.join(self.GetPackagesPath()), - self.helper, self.plugin_manager) + build_settings.Go( + self.GetWorkspaceRoot(), os.pathsep.join(self.GetPackagesPath()), self.helper, self.plugin_manager + ) build_settings.PlatformPreBuild() except Exception as e: exception_msg = e @@ -190,7 +213,7 @@ def parse_with_builder_settings(self, db: Edk2DB, pathobj: Edk2Path, env: VarDic return -1 env_dict = env.GetAllBuildKeyValues() | env.GetAllNonBuildKeyValues() - # Log the environment for debug purposes + # Log the environment for debug purposes for key, value in env_dict.items(): logging.debug(f" {key} = {value}") logging.info("Running parsers with the following settings:") @@ -238,11 +261,9 @@ def parse_with_ci_settings(self, db: Edk2DB, pathobj: Edk2Path, env: VarDict) -> def _get_package_config(self, pathobj: Edk2Path, pkg: str) -> str: """Gets configuration information for a package from the ci.yaml file.""" - pkg_config_file = pathobj.GetAbsolutePathOnThisSystemFromEdk2RelativePath( - str(Path(pkg, pkg + ".ci.yaml")) - ) + pkg_config_file = pathobj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(str(Path(pkg, pkg + ".ci.yaml"))) if pkg_config_file: - with open(pkg_config_file, 'r') as f: + with open(pkg_config_file, "r") as f: return yaml.safe_load(f) else: logging.debug(f"No package config file for {pkg}") @@ -256,19 +277,20 @@ def load_extra_tables(self, table_paths: list) -> list[TableGenerator]: table_list = [] for path in table_paths: if not Path(path).exists(): - logging.warning(f'[{path}] does not exist; Skipping.') + logging.warning(f"[{path}] does not exist; Skipping.") continue try: module = import_module_by_file_name(path) table_generator = locate_class_in_module(module, TableGenerator)() table_list.append(table_generator) except TypeError: - logging.warning(f'[{path}] does not contain a TableGenerator class; Skipping.') + logging.warning(f"[{path}] does not contain a TableGenerator class; Skipping.") except SyntaxError as e: - logging.warning(f'Failed to register [{path}] due to a SyntaxError; Error:') - logging.warning(f' {e}') + logging.warning(f"Failed to register [{path}] due to a SyntaxError; Error:") + logging.warning(f" {e}") return table_list + def main() -> None: """Entry point to invoke Edk2PlatformSetup.""" Edk2Parse().Invoke() diff --git a/edk2toolext/invocables/edk2_platform_build.py b/edk2toolext/invocables/edk2_platform_build.py index 43bd7ad3..9d6984cb 100644 --- a/edk2toolext/invocables/edk2_platform_build.py +++ b/edk2toolext/invocables/edk2_platform_build.py @@ -13,6 +13,7 @@ information to the Edk2PlatformBuild invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging import os @@ -73,7 +74,7 @@ def AddCommandLineOptions(self, parserObj: argparse.ArgumentParser) -> None: try: # if it's not, we will try to find it in the module that was originally provided. self.PlatformBuilder = locate_class_in_module(self.PlatformModule, UefiBuilder)() - except (TypeError): + except TypeError: raise RuntimeError(f"UefiBuild not found in module:\n{dir(self.PlatformModule)}") self.PlatformBuilder.AddPlatformCommandLineOptions(parserObj) @@ -100,14 +101,15 @@ def AddParserEpilog(self) -> str: custom_epilog += "CLI Env Variables:" for v in variables: # Setup wrap and print first portion of description - desc = wrap(v.description, max_desc_len, - drop_whitespace=True, break_on_hyphens=True, break_long_words=True) + desc = wrap( + v.description, max_desc_len, drop_whitespace=True, break_on_hyphens=True, break_long_words=True + ) custom_epilog += f"\n {v.name:<{max_name_len}} - {desc[0]:<{max_desc_len}} [{v.default}]" # If the line actually wrapped, we can print the rest of the lines here for d in desc[1:]: custom_epilog += f"\n {'':<{max_name_len}} {d:{max_desc_len}}" - custom_epilog += '\n\n' + custom_epilog += "\n\n" return custom_epilog + epilog @@ -134,7 +136,8 @@ def Go(self) -> int: Edk2PlatformBuild.collect_rust_info() (build_env, shell_env) = self_describing_environment.BootstrapEnvironment( - self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories()) + self.GetWorkspaceRoot(), self.GetActiveScopes(), self.GetSkippedDirectories() + ) # Bind our current execution environment into the shell vars. ph = os.path.dirname(sys.executable) @@ -151,8 +154,7 @@ def Go(self) -> int: # Load plugins logging.log(edk2_logging.SECTION, "Loading Plugins") pm = plugin_manager.PluginManager() - failedPlugins = pm.SetListOfEnvironmentDescriptors( - build_env.plugins) + failedPlugins = pm.SetListOfEnvironmentDescriptors(build_env.plugins) if failedPlugins: logging.critical("One or more plugins failed to load. Halting build.") for a in failedPlugins: @@ -160,7 +162,7 @@ def Go(self) -> int: raise Exception("One or more plugins failed to load.") helper = HelperFunctions() - if (helper.LoadFromPluginManager(pm) > 0): + if helper.LoadFromPluginManager(pm) > 0: raise Exception("One or more helper plugins failed to load.") # Make a pathobj so we can normalize and validate the workspace @@ -171,9 +173,7 @@ def Go(self) -> int: # Now we can actually kick off a build. # logging.log(edk2_logging.SECTION, "Kicking off build") - ret = self.PlatformBuilder.Go(pathobj.WorkspacePath, - os.pathsep.join(pathobj.PackagePathList), - helper, pm) + ret = self.PlatformBuilder.Go(pathobj.WorkspacePath, os.pathsep.join(pathobj.PackagePathList), helper, pm) logging.log(edk2_logging.SECTION, f"Log file is located at: {self.log_filename}") return ret diff --git a/edk2toolext/invocables/edk2_pr_eval.py b/edk2toolext/invocables/edk2_pr_eval.py index 49e0627e..b09758e7 100644 --- a/edk2toolext/invocables/edk2_pr_eval.py +++ b/edk2toolext/invocables/edk2_pr_eval.py @@ -14,6 +14,7 @@ file. This provides platform specific information to Edk2PrEval invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging import os @@ -59,11 +60,7 @@ def GetPlatformDscAndConfig(self) -> tuple: ``` """ - def FilterPackagesToTest( - self, - changedFilesList: list, - potentialPackagesList: list - ) -> list: + def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: """Filter potential packages to test based on changed files. !!! tip @@ -107,18 +104,34 @@ class Edk2PrEval(Edk2MultiPkgAwareInvocable): def AddCommandLineOptions(self, parserObj: argparse.ArgumentParser) -> None: """Adds command line options to the argparser.""" - parserObj.add_argument("--pr-target", dest='pr_target', type=str, default=None, - help="PR Branch Target. Allows build optimizations for pull request" - " validation based on files changed. If a package doesn't need testing then it will" - " be skipped. Example --pr-target origin/master", required=True) - parserObj.add_argument("--output-csv-format-string", dest='output_csv_format_string', type=str, default=None, - help="Provide format string that will be output to stdout a full csv of packages" - " to be tested. Valid Tokens: {pkgcsv}" - " Example --output-csv-format-string test={pkgcsv}") - parserObj.add_argument("--output-count-format-string", dest='output_count_format_string', type=str, - default=None, help="Provide format string that will be output to stdout the count of" - " packages to be tested. Valid Tokens: {pkgcount}" - " Example --output-count-format-string PackageCount={pkgcount}") + parserObj.add_argument( + "--pr-target", + dest="pr_target", + type=str, + default=None, + help="PR Branch Target. Allows build optimizations for pull request" + " validation based on files changed. If a package doesn't need testing then it will" + " be skipped. Example --pr-target origin/master", + required=True, + ) + parserObj.add_argument( + "--output-csv-format-string", + dest="output_csv_format_string", + type=str, + default=None, + help="Provide format string that will be output to stdout a full csv of packages" + " to be tested. Valid Tokens: {pkgcsv}" + " Example --output-csv-format-string test={pkgcsv}", + ) + parserObj.add_argument( + "--output-count-format-string", + dest="output_count_format_string", + type=str, + default=None, + help="Provide format string that will be output to stdout the count of" + " packages to be tested. Valid Tokens: {pkgcount}" + " Example --output-count-format-string PackageCount={pkgcount}", + ) super().AddCommandLineOptions(parserObj) def RetrieveCommandLineOptions(self, args: argparse.Namespace) -> None: @@ -152,7 +165,8 @@ def Go(self) -> int: # A packages path is ok to drop for this because if it isn't populated it is assumed outside # the repository and thus will not trigger the build. self.edk2_path_obj = path_utilities.Edk2Path( - self.GetWorkspaceRoot(), self.GetPackagesPath(), error_on_invalid_pp=False) + self.GetWorkspaceRoot(), self.GetPackagesPath(), error_on_invalid_pp=False + ) self.logger = logging.getLogger("edk2_pr_eval") actualPackagesDict = self.get_packages_to_build(self.requested_package_list) @@ -203,10 +217,9 @@ def get_packages_to_build(self, possible_packages: list) -> dict: # for f in files: if not self.edk2_path_obj.GetContainingPackage(os.path.abspath(f)): - return dict.fromkeys(possible_packages, - "Policy 0 - Build all packages if " - "a file is modified outside a " - "package.") + return dict.fromkeys( + possible_packages, "Policy 0 - Build all packages if " "a file is modified outside a " "package." + ) remaining_packages = possible_packages.copy() # start with all possible packages and remove each # package once it is determined to be build. This optimization @@ -240,7 +253,7 @@ def get_packages_to_build(self, possible_packages: list) -> dict: # Ignore a file in which we fail to get the package continue - if (pkg not in packages_to_build.keys() and pkg in remaining_packages): + if pkg not in packages_to_build.keys() and pkg in remaining_packages: packages_to_build[pkg] = "Policy 2 - Build any package that has changed" remaining_packages.remove(pkg) @@ -276,7 +289,7 @@ def get_packages_to_build(self, possible_packages: list) -> dict: # NOTE: future enhancement could be to check actual file dependencies for a in public_package_changes: for p in remaining_packages[:]: # slice so we can delete as we go - if (self._does_pkg_depend_on_package(p, a)): + if self._does_pkg_depend_on_package(p, a): packages_to_build[p] = f"Policy 3 - Package depends on {a}" remaining_packages.remove(p) # remove from remaining packages @@ -325,8 +338,10 @@ def get_packages_to_build(self, possible_packages: list) -> dict: for p in remaining_packages[:]: dsc, defines = self._get_package_ci_information(p) if not dsc: - logging.debug(f"Policy 5 - Package {p} skipped due to missing ci.dsc file or missing DscPath" - "section of the PrEval settings.") + logging.debug( + f"Policy 5 - Package {p} skipped due to missing ci.dsc file or missing DscPath" + "section of the PrEval settings." + ) continue dsc_parser = DscParser() @@ -395,13 +410,13 @@ def _get_files_that_changed_in_this_pr(self, base_branch: str) -> tuple: cmd_params = f"diff --name-only HEAD..{base_branch}" rc = RunCmd("git", cmd_params, outstream=output) - if (rc == 0): + if rc == 0: self.logger.debug("git diff command returned successfully!") else: self.logger.critical("git diff returned error return value: %s" % str(rc)) return (rc, []) - if (output.getvalue() is None): + if output.getvalue() is None: self.logger.info("No files listed in diff") return (0, []) @@ -455,7 +470,7 @@ def _is_public_file(self, filepath: str) -> bool: return False dec = None - if (pkg in self.parsed_dec_cache): + if pkg in self.parsed_dec_cache: dec = self.parsed_dec_cache[pkg] else: abs_pkg_path = self.edk2_path_obj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(pkg) @@ -471,12 +486,7 @@ def _is_public_file(self, filepath: str) -> bool: return False - def _walk_dir_for_filetypes( - self, - extensionlist: list, - directory: str, - ignorelist: Optional[list]=None - ) -> list: + def _walk_dir_for_filetypes(self, extensionlist: list, directory: str, ignorelist: Optional[list] = None) -> list: """Walks a directory for all items ending in certain extension.""" if not isinstance(extensionlist, list): raise ValueError("Expected list but got " + str(type(extensionlist))) @@ -505,9 +515,9 @@ def _walk_dir_for_filetypes( for Extension in extensionlist_lower: if File.lower().endswith(Extension): ignoreIt = False - if (ignorelist is not None): + if ignorelist is not None: for c in ignorelist_lower: - if (File.lower().startswith(c)): + if File.lower().startswith(c): ignoreIt = True break if not ignoreIt: @@ -518,14 +528,14 @@ def _walk_dir_for_filetypes( def _get_package_ci_information(self, pkg_name: str) -> str: pkg_path = Path(self.edk2_path_obj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(pkg_name)) - ci_file = pkg_path.joinpath(f'{pkg_name}.ci.yaml') + ci_file = pkg_path.joinpath(f"{pkg_name}.ci.yaml") dsc = None defines = None if not ci_file.exists(): return (None, None) - with open(ci_file, 'r') as f: + with open(ci_file, "r") as f: data = yaml.safe_load(f) dsc = data.get("PrEval", {"DscPath": None})["DscPath"] dsc = str(pkg_path / dsc) if dsc else None diff --git a/edk2toolext/invocables/edk2_setup.py b/edk2toolext/invocables/edk2_setup.py index a5e04db8..67c6081b 100644 --- a/edk2toolext/invocables/edk2_setup.py +++ b/edk2toolext/invocables/edk2_setup.py @@ -11,6 +11,7 @@ file. This provides platform specific information to Edk2PlatformSetup invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging import os @@ -34,8 +35,9 @@ ) -class RequiredSubmodule(): +class RequiredSubmodule: """A class containing information about a git submodule.""" + def __init__(self, path: str, recursive: bool = True) -> None: """Object to hold necessary information for resolving submodules. @@ -83,9 +85,10 @@ class Edk2PlatformSetup(Edk2MultiPkgAwareInvocable): def AddCommandLineOptions(self, parserObj: argparse.ArgumentParser) -> None: """Adds command line options to the argparser.""" - parserObj.add_argument('--force', '--FORCE', '--Force', dest="force", action='store_true', default=False) - parserObj.add_argument('--omnicache', '--OMNICACHE', '--Omnicache', dest='omnicache_path', - default=os.environ.get('OMNICACHE_PATH')) + parserObj.add_argument("--force", "--FORCE", "--Force", dest="force", action="store_true", default=False) + parserObj.add_argument( + "--omnicache", "--OMNICACHE", "--Omnicache", dest="omnicache_path", default=os.environ.get("OMNICACHE_PATH") + ) super().AddCommandLineOptions(parserObj) @@ -132,17 +135,17 @@ def Go(self) -> int: workspace_path = self.GetWorkspaceRoot() details = repo_details(workspace_path) - git_version = details['GitVersion'] + git_version = details["GitVersion"] - version_aggregator.GetVersionAggregator().ReportVersion("Git", - git_version, - version_aggregator.VersionTypes.TOOL) + version_aggregator.GetVersionAggregator().ReportVersion( + "Git", git_version, version_aggregator.VersionTypes.TOOL + ) # Pre-setup cleaning if "--force" is specified. if self.force_it: try: # Clean the workspace - edk2_logging.log_progress('## Cleaning the root repo') + edk2_logging.log_progress("## Cleaning the root repo") clean(workspace_path, ignore_files=[f'Build/{self.GetLoggingFileName("txt")}.txt']) edk2_logging.log_progress("Done.\n") except InvalidGitRepositoryError: @@ -154,9 +157,9 @@ def Go(self) -> int: for required_submodule in required_submodules: try: submodule_path = os.path.join(workspace_path, required_submodule.path) - edk2_logging.log_progress(f'## Cleaning Git Submodule: {required_submodule.path}') + edk2_logging.log_progress(f"## Cleaning Git Submodule: {required_submodule.path}") submodule_clean(workspace_path, required_submodule) - edk2_logging.log_progress('## Done.\n') + edk2_logging.log_progress("## Done.\n") except InvalidGitRepositoryError: logging.error(f"Error when trying to clean {submodule_path}") logging.error(f"Invalid Git Repository at {submodule_path}") @@ -168,19 +171,19 @@ def Go(self) -> int: # Resolve all of the submodules to the specifed branch and commit. i.e. sync, then update for submodule in required_submodules: - edk2_logging.log_progress(f'## Resolving Git Submodule: {submodule.path}') + edk2_logging.log_progress(f"## Resolving Git Submodule: {submodule.path}") submodule_details = repo_details(os.path.join(workspace_path, submodule.path)) # Don't update a dirty submodule unless we are forcing. - if submodule_details['Dirty'] and not self.force_it: - logging.info('-- NOTE: Submodule currently exists and appears to have local changes!') - logging.info('-- Skipping fetch!') + if submodule_details["Dirty"] and not self.force_it: + logging.info("-- NOTE: Submodule currently exists and appears to have local changes!") + logging.info("-- Skipping fetch!") logging.log(edk2_logging.get_progress_level(), "Done.\n") continue try: # Sync, then Update & Init the submodule submodule_resolve(workspace_path, submodule, omnicache_path=self.omnicache_path) - edk2_logging.log_progress('## Done.\n') + edk2_logging.log_progress("## Done.\n") except InvalidGitRepositoryError: logging.error(f"Error when trying to resolve {submodule.path}") logging.error(f"Invalid Git Repository at {submodule.path}") diff --git a/edk2toolext/invocables/edk2_update.py b/edk2toolext/invocables/edk2_update.py index f0399d51..8736a3e7 100644 --- a/edk2toolext/invocables/edk2_update.py +++ b/edk2toolext/invocables/edk2_update.py @@ -12,6 +12,7 @@ file. This provides platform specific information to Edk2Update invocable while allowing the invocable itself to remain platform agnostic. """ + import argparse import logging @@ -35,13 +36,15 @@ class UpdateSettingsManager(MultiPkgAwareSettingsInterface): def build_env_changed( - build_env: self_describing_environment.self_describing_environment, - build_env_2: self_describing_environment.self_describing_environment - ) -> bool: + build_env: self_describing_environment.self_describing_environment, + build_env_2: self_describing_environment.self_describing_environment, +) -> bool: """Return True if build_env has changed.""" - return (build_env.paths != build_env_2.paths) or \ - (build_env.extdeps != build_env_2.extdeps) or \ - (build_env.plugins != build_env_2.plugins) + return ( + (build_env.paths != build_env_2.paths) + or (build_env.extdeps != build_env_2.extdeps) + or (build_env.plugins != build_env_2.plugins) + ) class Edk2Update(Edk2MultiPkgAwareInvocable): @@ -104,8 +107,10 @@ def Go(self) -> int: break # if the environment has changed, increment the retry count and notify user RetryCount += 1 - logging.log(edk2_logging.SECTION, - f"Something in the environment changed. Updating environment again. Pass {RetryCount}") + logging.log( + edk2_logging.SECTION, + f"Something in the environment changed. Updating environment again. Pass {RetryCount}", + ) build_env_old = build_env self_describing_environment.DestroyEnvironment() diff --git a/edk2toolext/nuget_publishing.py b/edk2toolext/nuget_publishing.py index 2bd9ca74..8efce45b 100644 --- a/edk2toolext/nuget_publishing.py +++ b/edk2toolext/nuget_publishing.py @@ -7,6 +7,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent ## """Provides configuration, packing, and publishing nuget packages to a release feed.""" + import argparse import datetime import logging @@ -36,9 +37,10 @@ class NugetSupport(object): """Support object for Nuget Publishing tool to configure NuPkg information, pack and send.""" + # NOTE: This *should* have a namespace (http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd) # but ElementTree is incredibly stupid with namespaces. - NUSPEC_TEMPLATE_XML = r''' + NUSPEC_TEMPLATE_XML = r""" @@ -59,11 +61,11 @@ class NugetSupport(object): -''' +""" RELEASE_NOTE_SHORT_STRING_MAX_LENGTH = 500 - def __init__(self, Name: Optional[str]=None, ConfigFile: Optional[str]=None) -> None: + def __init__(self, Name: Optional[str] = None, ConfigFile: Optional[str] = None) -> None: """Inits a new NugetSupport object. for new instances without existing config provide the Name parameter. @@ -74,11 +76,11 @@ def __init__(self, Name: Optional[str]=None, ConfigFile: Optional[str]=None) -> self.NewVersion = None self.ConfigChanged = False - if (ConfigFile is not None): + if ConfigFile is not None: self.FromConfigfile(ConfigFile) self.Name = self.ConfigData["name"] else: - if (Name is None): + if Name is None: raise ValueError("Cannot construct object with both Name and ConfigFile as None") self.ConfigData = {"name": Name} self.Config = None @@ -89,20 +91,20 @@ def CleanUp(self) -> None: for a in self.TempFileToDelete: os.remove(a) - def ToConfigFile(self, filepath: Optional[str]=None) -> int: + def ToConfigFile(self, filepath: Optional[str] = None) -> int: """Save config to a yaml file.""" - if (not self.ConfigChanged): + if not self.ConfigChanged: logging.debug("No Config Changes. Skip Writing config file") return 0 - if (filepath is None and self.Config is None): + if filepath is None and self.Config is None: logging.error("No Config File to save to.") return -1 - if (filepath is not None): + if filepath is not None: self.Config = filepath - if (filepath is None): + if filepath is None: logging.error("No filepath for Config File") with open(filepath, "w") as c: @@ -118,18 +120,18 @@ def FromConfigfile(self, filepath: str) -> None: self.ConfigData = yaml.safe_load(c) def SetBasicData( - self, - authors: str, - license: str, - project: str, - description: str, - server: str, - copyright: str, - repositoryType: Optional[str]=None, - repositoryUrl: Optional[str]=None, - repositoryBranch: Optional[str]=None, - repositoryCommit: Optional[str]=None - ) -> None: + self, + authors: str, + license: str, + project: str, + description: str, + server: str, + copyright: str, + repositoryType: Optional[str] = None, + repositoryUrl: Optional[str] = None, + repositoryBranch: Optional[str] = None, + repositoryCommit: Optional[str] = None, + ) -> None: """Set basic data in the config data.""" self.ConfigData["author_string"] = authors if license: @@ -184,7 +186,7 @@ def UpdateCopyright(self, copyright: str) -> None: self.ConfigData["copyright_string"] = copyright self.ConfigChanged = True - def UpdateTags(self, tags: list[str]=None) -> None: + def UpdateTags(self, tags: list[str] = None) -> None: """Update tags in the config data.""" if tags is None: tags = [] @@ -192,12 +194,12 @@ def UpdateTags(self, tags: list[str]=None) -> None: self.ConfigChanged = True def UpdateRepositoryInfo( - self, - r_type: Optional[str]=None, - url: Optional[str]=None, - branch:Optional[str]=None, - commit:Optional[str]=None - ) -> None: + self, + r_type: Optional[str] = None, + url: Optional[str] = None, + branch: Optional[str] = None, + commit: Optional[str] = None, + ) -> None: """Update repository information.""" if r_type: self.ConfigData["repository_type"] = r_type @@ -216,7 +218,7 @@ def Print(self) -> str: """Print info about the Nuget Object.""" print("=======================================") print(" Name: " + self.Name) - if (self.Config): + if self.Config: print(" ConfigFile: " + self.Config) else: print(" ConfigFile: NOT SET") @@ -235,7 +237,7 @@ def LogObject(self) -> None: """Logs info about Nuget Object to the logger.""" logging.debug("=======================================") logging.debug(" Name: " + self.Name) - if (self.Config): + if self.Config: logging.debug(" ConfigFile: " + self.Config) else: logging.debug(" ConfigFile: NOT SET") @@ -255,7 +257,7 @@ def LogObject(self) -> None: # create a nuspec file for packing # - def _MakeNuspecXml(self, ContentDir: str, ReleaseNotesText: Optional[str]=None) ->str: + def _MakeNuspecXml(self, ContentDir: str, ReleaseNotesText: Optional[str] = None) -> str: package = etree.fromstring(NugetSupport.NUSPEC_TEMPLATE_XML) package.attrib["xmlns"] = "http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd" meta = package.find("./metadata") @@ -297,16 +299,16 @@ def _MakeNuspecXml(self, ContentDir: str, ReleaseNotesText: Optional[str]=None) meta.find("license").text = self.ConfigData["license"] meta.find("license").attrib["type"] = "expression" - if (ReleaseNotesText is not None): + if ReleaseNotesText is not None: logging.debug("Make Nuspec Xml - ReleaseNotesText is not none.") # # Make sure it doesn't exceed reasonable length of string # - if (len(ReleaseNotesText) > NugetSupport.RELEASE_NOTE_SHORT_STRING_MAX_LENGTH): + if len(ReleaseNotesText) > NugetSupport.RELEASE_NOTE_SHORT_STRING_MAX_LENGTH: logging.info("Make Nuspec Xml - ReleaseNotesText too long. Length is (%d)" % len(ReleaseNotesText)) logging.debug("Original ReleaseNotesText is: %s" % ReleaseNotesText) # cut it off at max length - ReleaseNotesText = ReleaseNotesText[:NugetSupport.RELEASE_NOTE_SHORT_STRING_MAX_LENGTH] + ReleaseNotesText = ReleaseNotesText[: NugetSupport.RELEASE_NOTE_SHORT_STRING_MAX_LENGTH] # walk back to trim at last end of sentence ReleaseNotesText = ReleaseNotesText.rpartition(".")[0].strip() logging.debug("New ReleaseNotesText is: %s" % ReleaseNotesText) @@ -337,7 +339,7 @@ def _GetNuPkgFileName(self, version: str) -> str: s += ".nupkg" return s - def Pack(self, version: str, OutputDir: str, ContentDir:str, RelNotes: Optional[str]=None) -> int: + def Pack(self, version: str, OutputDir: str, ContentDir: str, RelNotes: Optional[str] = None) -> int: """Pack the current contents into Nupkg.""" self.NewVersion = version @@ -361,7 +363,7 @@ def Pack(self, version: str, OutputDir: str, ContentDir:str, RelNotes: Optional[ # cmd += ["-NonInteractive"] ret = RunCmd(cmd[0], " ".join(cmd[1:])) - if (ret != 0): + if ret != 0: logging.error("Failed on nuget command. RC = 0x%x" % ret) return ret @@ -375,7 +377,7 @@ def Push(self, nuPackage: str, apikey: str) -> int: Raises: (Exception): file path is invalid """ - if (not os.path.isfile(nuPackage)): + if not os.path.isfile(nuPackage): raise Exception("Invalid file path for NuPkg file") logging.debug("Pushing %s file to server %s" % (nuPackage, self.ConfigData["server_url"])) @@ -388,7 +390,7 @@ def Push(self, nuPackage: str, apikey: str) -> int: output_buffer = StringIO() ret = RunCmd(cmd[0], " ".join(cmd[1:]), outstream=output_buffer) - if (ret != 0): + if ret != 0: # Rewind the buffer and capture the contents. output_buffer.seek(0) output_contents = output_buffer.read() @@ -406,89 +408,142 @@ def Push(self, nuPackage: str, apikey: str) -> int: def GatherArguments() -> argparse.Namespace: """Adds CLI arguments for controlling the nuget_publishing tool.""" tempparser = argparse.ArgumentParser( - description='Nuget Helper Script for creating, packing, and pushing packages', add_help=False) - tempparser.add_argument('--Operation', dest="op", choices=["New", "Pack", "Push", "PackAndPush"], required=True) + description="Nuget Helper Script for creating, packing, and pushing packages", add_help=False + ) + tempparser.add_argument("--Operation", dest="op", choices=["New", "Pack", "Push", "PackAndPush"], required=True) # Get the operation the user wants to do (args, rest) = tempparser.parse_known_args() # now build up the real parser with required parameters - parser = argparse.ArgumentParser(description='Nuget Helper Script for creating, packing, and pushing packages') + parser = argparse.ArgumentParser(description="Nuget Helper Script for creating, packing, and pushing packages") parser.add_argument("--Dirty", dest="Dirty", action="store_true", help="Keep all temp files", default=False) - parser.add_argument('--Operation', dest="Operation", choices=["New", "Pack", "Push", "PackAndPush"], required=True) + parser.add_argument("--Operation", dest="Operation", choices=["New", "Pack", "Push", "PackAndPush"], required=True) parser.add_argument("--OutputLog", dest="OutputLog", help="Create an output log file") - if (args.op.lower() == "new"): - parser.add_argument("--ConfigFileFolderPath", dest="ConfigFileFolderPath", - help="Path to folder to save new config file to", required=True) - parser.add_argument('--Name', - dest='Name', - help=' The unique id/name of the package. This is a string naming the package', - required=True) - parser.add_argument('--Author', dest="Author", help=" Author string for publishing", required=True) + if args.op.lower() == "new": + parser.add_argument( + "--ConfigFileFolderPath", + dest="ConfigFileFolderPath", + help="Path to folder to save new config file to", + required=True, + ) + parser.add_argument( + "--Name", + dest="Name", + help=" The unique id/name of the package. This is a string naming the package", + required=True, + ) + parser.add_argument("--Author", dest="Author", help=" Author string for publishing", required=True) parser.add_argument("--ProjectUrl", dest="Project", help=" Project Url", required=True) - repo_group = parser.add_argument_group(title="Repository Parameters", - description="Optional Repository Parameters") - repo_group.add_argument("--RepositoryType", dest="RepositoryType", help=" Repository Type", - required=False) - repo_group.add_argument("--RepositoryUrl", dest="RepositoryUrl", help=" Repository Url", - required=False) - repo_group.add_argument("--RepositoryBranch", dest="RepositoryBranch", help=" Repository Branch", - required=False) - repo_group.add_argument("--RepositoryCommit", dest="RepositoryCommit", help=" Repository Commit", - required=False) - parser.add_argument('--LicenseIdentifier', dest="LicenseIdentifier", default=None, - choices=LICENSE_IDENTIFIER_SUPPORTED.keys(), help="Standard Licenses") - parser.add_argument('--Description', dest="Description", - help=" Description of package.", required=True) - parser.add_argument("--FeedUrl", dest="FeedUrl", - help="Feed Url of the nuget server feed", required=True) - parser.add_argument('--Copyright', dest="Copyright", help="Copyright string", required=False) - - elif (args.op.lower() == "pack" or args.op.lower() == "packandpush"): - parser.add_argument("--ConfigFilePath", dest="ConfigFilePath", - help="Path to config file", required=True) - parser.add_argument('--Version', dest="Version", help=" Version to publish", required=True) - parser.add_argument('--ReleaseNotesText', dest="ReleaseNotes", - help="Release Notes String", required=False) - parser.add_argument('--InputFolderPath', dest="InputFolderPath", - help="Relative/Absolute Path to folder containing content to pack.", - required=True) - parser.add_argument('--Copyright', dest="Copyright", help="Change the Copyright string") - parser.add_argument('--t', "-tag", dest="Tags", type=str, - help="Add tags to the nuspec. Multiple are --t Tag1,Tag2 or --t Tag1 --t Tag2", - action="append", default=[]) - parser.add_argument('--ApiKey', dest="ApiKey", - help="Api key to use. Default is 'VSTS' which will invoke interactive login", - default="VSTS") - parser.add_argument('--CustomLicensePath', dest="CustomLicensePath", default=None, - help=" If CustomLicense set in `new` phase, provide absolute path of License \ - File to pack. Does not override existing valid license.") - repo_group = parser.add_argument_group(title="Repository Parameters", - description="Optional Repository Parameters") - repo_group.add_argument("--RepositoryType", dest="RepositoryType", help=" Repository Type", - required=False) - repo_group.add_argument("--RepositoryUrl", dest="RepositoryUrl", help=" Change the repository Url", - required=False) - repo_group.add_argument("--RepositoryBranch", dest="RepositoryBranch", - help=" Change the repository branch", required=False) - repo_group.add_argument("--RepositoryCommit", dest="RepositoryCommit", - help=" Change the repository commit", required=False) - - elif (args.op.lower() == "push"): - parser.add_argument("--ConfigFilePath", dest="ConfigFilePath", - help="Path to config file", - required=True) - parser.add_argument('--PackageFile', dest="PackageFile", help="Path To Package File", required=True) - parser.add_argument('--ApiKey', dest="ApiKey", - help="Api key to use. Default is 'VSTS' which will invoke interactive login", - default="VSTS") - - if (args.op.lower() == "pack"): - parser.add_argument('--OutputFolderPath', - dest="OutputFolderPath", - help="Output folder where nupkg will be saved. Default is cwd", - default=os.getcwd()) + repo_group = parser.add_argument_group( + title="Repository Parameters", description="Optional Repository Parameters" + ) + repo_group.add_argument( + "--RepositoryType", dest="RepositoryType", help=" Repository Type", required=False + ) + repo_group.add_argument( + "--RepositoryUrl", dest="RepositoryUrl", help=" Repository Url", required=False + ) + repo_group.add_argument( + "--RepositoryBranch", dest="RepositoryBranch", help=" Repository Branch", required=False + ) + repo_group.add_argument( + "--RepositoryCommit", dest="RepositoryCommit", help=" Repository Commit", required=False + ) + parser.add_argument( + "--LicenseIdentifier", + dest="LicenseIdentifier", + default=None, + choices=LICENSE_IDENTIFIER_SUPPORTED.keys(), + help="Standard Licenses", + ) + parser.add_argument( + "--Description", dest="Description", help=" Description of package.", required=True + ) + parser.add_argument( + "--FeedUrl", dest="FeedUrl", help="Feed Url of the nuget server feed", required=True + ) + parser.add_argument("--Copyright", dest="Copyright", help="Copyright string", required=False) + + elif args.op.lower() == "pack" or args.op.lower() == "packandpush": + parser.add_argument( + "--ConfigFilePath", dest="ConfigFilePath", help="Path to config file", required=True + ) + parser.add_argument("--Version", dest="Version", help=" Version to publish", required=True) + parser.add_argument( + "--ReleaseNotesText", dest="ReleaseNotes", help="Release Notes String", required=False + ) + parser.add_argument( + "--InputFolderPath", + dest="InputFolderPath", + help="Relative/Absolute Path to folder containing content to pack.", + required=True, + ) + parser.add_argument("--Copyright", dest="Copyright", help="Change the Copyright string") + parser.add_argument( + "--t", + "-tag", + dest="Tags", + type=str, + help="Add tags to the nuspec. Multiple are --t Tag1,Tag2 or --t Tag1 --t Tag2", + action="append", + default=[], + ) + parser.add_argument( + "--ApiKey", + dest="ApiKey", + help="Api key to use. Default is 'VSTS' which will invoke interactive login", + default="VSTS", + ) + parser.add_argument( + "--CustomLicensePath", + dest="CustomLicensePath", + default=None, + help=" If CustomLicense set in `new` phase, provide absolute path of License \ + File to pack. Does not override existing valid license.", + ) + repo_group = parser.add_argument_group( + title="Repository Parameters", description="Optional Repository Parameters" + ) + repo_group.add_argument( + "--RepositoryType", dest="RepositoryType", help=" Repository Type", required=False + ) + repo_group.add_argument( + "--RepositoryUrl", dest="RepositoryUrl", help=" Change the repository Url", required=False + ) + repo_group.add_argument( + "--RepositoryBranch", + dest="RepositoryBranch", + help=" Change the repository branch", + required=False, + ) + repo_group.add_argument( + "--RepositoryCommit", + dest="RepositoryCommit", + help=" Change the repository commit", + required=False, + ) + + elif args.op.lower() == "push": + parser.add_argument( + "--ConfigFilePath", dest="ConfigFilePath", help="Path to config file", required=True + ) + parser.add_argument("--PackageFile", dest="PackageFile", help="Path To Package File", required=True) + parser.add_argument( + "--ApiKey", + dest="ApiKey", + help="Api key to use. Default is 'VSTS' which will invoke interactive login", + default="VSTS", + ) + + if args.op.lower() == "pack": + parser.add_argument( + "--OutputFolderPath", + dest="OutputFolderPath", + help="Output folder where nupkg will be saved. Default is cwd", + default=os.getcwd(), + ) return parser.parse_args() @@ -499,31 +554,31 @@ def main() -> int: ret = 0 # setup file based logging if outputReport specified - if (args.OutputLog): - if (len(args.OutputLog) < 2): + if args.OutputLog: + if len(args.OutputLog) < 2: logging.critical("the output log file parameter is invalid") return -2 # setup file based logging - filelogger = logging.FileHandler(filename=args.OutputLog, mode='w') + filelogger = logging.FileHandler(filename=args.OutputLog, mode="w") filelogger.setLevel(logging.DEBUG) - logging.getLogger('').addHandler(filelogger) + logging.getLogger("").addHandler(filelogger) logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p")) TempOutDir = None NuPkgFilePath = None - if (args.Operation.lower() == "new"): + if args.Operation.lower() == "new": logging.critical("Generating new nuget configuration...") logging.debug("Checking input parameters for new") ConfigFilePath = os.path.join(args.ConfigFileFolderPath, args.Name.strip() + ".config.yaml") - if (not os.path.isdir(args.ConfigFileFolderPath)): + if not os.path.isdir(args.ConfigFileFolderPath): logging.critical("Config File Folder Path doesn't exist. %s" % args.ConfigFileFolderPath) raise Exception("Invalid Config File Folder. Doesn't exist") - if (os.path.isfile(ConfigFilePath)): + if os.path.isfile(ConfigFilePath): logging.critical("Config File already exists at that path. %s" % ConfigFilePath) raise Exception("Can't Create New Config file when file already exists") @@ -546,24 +601,25 @@ def main() -> int: args.RepositoryType, args.RepositoryUrl, args.RepositoryBranch, - args.RepositoryCommit) + args.RepositoryCommit, + ) nu.LogObject() ret = nu.ToConfigFile(ConfigFilePath) return ret - elif (args.Operation.lower() == "pack" or args.Operation.lower() == "packandpush"): + elif args.Operation.lower() == "pack" or args.Operation.lower() == "packandpush": logging.critical("Creating nuget package") logging.debug("Checking input parameters for packing") # check args - if (not os.path.isfile(args.ConfigFilePath)): + if not os.path.isfile(args.ConfigFilePath): logging.critical("Invalid Config File (%s). File doesn't exist" % args.ConfigFilePath) raise Exception("Invalid Config File. File doesn't exist") - if (not os.path.isdir(args.InputFolderPath)): + if not os.path.isdir(args.InputFolderPath): logging.critical("Invalid Input folder (%s). Folder doesn't exist" % args.InputFolderPath) raise Exception("Invalid Input folder. folder doesn't exist") contents = os.listdir(args.InputFolderPath) logging.debug("Input Folder contains %d files" % len(contents)) - if (len(contents) == 0): + if len(contents) == 0: logging.critical("No binary contents to pack in %s" % args.InputFolderPath) raise Exception("No binary contents to package") @@ -587,13 +643,12 @@ def main() -> int: logging.critical(" Verify custom license file path is in absolute format and valid") raise Exception("Invalid License.") - if (args.Copyright is not None): + if args.Copyright is not None: nu.UpdateCopyright(args.Copyright) - nu.UpdateRepositoryInfo(args.RepositoryType, args.RepositoryUrl, - args.RepositoryBranch, args.RepositoryCommit) + nu.UpdateRepositoryInfo(args.RepositoryType, args.RepositoryUrl, args.RepositoryBranch, args.RepositoryCommit) - if (len(args.Tags) > 0): + if len(args.Tags) > 0: tagListSet = set() for item in args.Tags: # Parse out the individual packages item_list = item.split(",") @@ -603,53 +658,53 @@ def main() -> int: tagListSet.add(individual_item.strip()) tagList = list(tagListSet) nu.UpdateTags(tagList) - ''' + """ ret = nu.ToConfigFile() if (ret != 0): logging.error("Failed to save config file. Return Code 0x%x" % ret) return ret - ''' + """ ret = nu.Pack(args.Version, TempOutDir, args.InputFolderPath, args.ReleaseNotes) - if (ret != 0): + if ret != 0: logging.error("Failed to pack. Return Code 0x%x" % ret) return ret NuPkgFilePath = nu.NuPackageFile - if (args.Operation.lower() == "pack"): - if (not os.path.isdir(args.OutputFolderPath)): + if args.Operation.lower() == "pack": + if not os.path.isdir(args.OutputFolderPath): logging.critical("Invalid Pack Output Folder (%s). Folder doesn't exist" % args.OutputFolderPath) raise Exception("Invalid Output folder. folder doesn't exist") # since it is pack only lets copy nupkg file to output shutil.copyfile(NuPkgFilePath, os.path.join(args.OutputFolderPath, os.path.basename(NuPkgFilePath))) NuPkgFilePath = os.path.join(args.OutputFolderPath, os.path.basename(NuPkgFilePath)) - if (args.Operation.lower() == "push"): + if args.Operation.lower() == "push": # set the parameters for push logging.debug("Checking input parameters for push") # check args - if (not os.path.isfile(args.ConfigFilePath)): + if not os.path.isfile(args.ConfigFilePath): logging.critical("Invalid Config File (%s). File doesn't exist" % args.ConfigFilePath) raise Exception("Invalid Config File. File doesn't exist") NuPkgFilePath = args.PackageFile nu = NugetSupport(ConfigFile=args.ConfigFilePath) - if (args.Operation.lower() == "push" or args.Operation.lower() == "packandpush"): + if args.Operation.lower() == "push" or args.Operation.lower() == "packandpush": # do the pushing logging.critical("Pushing the package") logging.debug("NuPkgFilePath is %s" % NuPkgFilePath) # check args - if (not os.path.isfile(NuPkgFilePath)): + if not os.path.isfile(NuPkgFilePath): logging.critical("NuPkgFilePath is not valid file. %s" % NuPkgFilePath) raise Exception("Invalid Pkg File. File doesn't exist") ret = nu.Push(NuPkgFilePath, args.ApiKey) nu.LogObject() nu.ToConfigFile(args.ConfigFilePath) # save any changes - if (not args.Dirty): + if not args.Dirty: nu.CleanUp() - if (TempOutDir is not None): + if TempOutDir is not None: os.removedirs(TempOutDir) return ret @@ -657,7 +712,7 @@ def main() -> int: def go() -> None: """Main entry into the nuget publishing tool.""" # setup main console as logger - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(levelname)s - %(message)s") console = logging.StreamHandler() @@ -677,5 +732,5 @@ def go() -> None: sys.exit(retcode) -if __name__ == '__main__': +if __name__ == "__main__": go() diff --git a/edk2toolext/omnicache.py b/edk2toolext/omnicache.py index a8709033..01c3dc26 100644 --- a/edk2toolext/omnicache.py +++ b/edk2toolext/omnicache.py @@ -5,6 +5,7 @@ ## # spell-checker:ignore rtags """Omnicache tool for lessening network usage and time of cloning repositories.""" + import argparse import datetime import logging @@ -39,9 +40,10 @@ PRE_0_11_OMNICACHE_FILENAME = "omnicache.yaml" -class Omnicache(): +class Omnicache: """Class for managing an omnicache instance.""" - def __init__(self, cachepath: str, create: bool=False, convert: bool=True) -> None: + + def __init__(self, cachepath: str, create: bool = False, convert: bool = True) -> None: """Initializes an omnicache. Args: @@ -54,10 +56,10 @@ def __init__(self, cachepath: str, create: bool=False, convert: bool=True) -> No self._InvalidateUrlLookupCache() (valid, isConversionCandidate) = self._ValidateOmnicache() - if (not valid): + if not valid: if create and not isConversionCandidate: self._InitOmnicache() - elif (convert and isConversionCandidate): + elif convert and isConversionCandidate: self._ConvertOmnicache() else: logging.critical("Omnicache at {0} not valid, and cannot create/convert.".format(cachepath)) @@ -73,24 +75,24 @@ def _ValidateOmnicache(self) -> tuple: NOTE: "convertible" is True if an older Omnicache that can be converted exists at self.path. """ logging.info("Checking if {0} is valid omnicache.".format(self.path)) - if (not os.path.isdir(self.path)): + if not os.path.isdir(self.path): logging.debug("{0} does not exist - not valid (not convertible).".format(self.path)) return (False, False) out = StringIO() ret = RunCmd("git", "rev-parse --is-bare-repository", workingdir=self.path, outstream=out) - if (ret != 0): + if ret != 0: logging.debug("{0} error getting repo state - not valid (not convertible).".format(self.path)) return (False, False) - if (out.getvalue().strip().lower() == "true"): - if (os.path.exists(os.path.join(self.path, PRE_0_11_OMNICACHE_FILENAME))): + if out.getvalue().strip().lower() == "true": + if os.path.exists(os.path.join(self.path, PRE_0_11_OMNICACHE_FILENAME)): logging.debug("{0} - old config file present. not valid (is convertible).".format(self.path)) return (False, True) out = StringIO() ret = RunCmd("git", "config --local omnicache.metadata.version", workingdir=self.path, outstream=out) - if (ret != 0): + if ret != 0: logging.debug("{0} - error retrieving omnicache version. not valid (is convertible).".format(self.path)) return (False, True) - if (out.getvalue().strip() == OMNICACHE_VERSION): + if out.getvalue().strip() == OMNICACHE_VERSION: logging.debug("{0} - matching omnicache version. valid (convertible don't care).".format(self.path)) return (True, True) else: @@ -104,50 +106,54 @@ def _InitOmnicache(self) -> int: logging.critical("Initializing Omnicache in {0}".format(self.path)) os.makedirs(self.path, exist_ok=True) ret = RunCmd("git", "init --bare", workingdir=self.path) - if (ret != 0): + if ret != 0: return ret # by default, git fetch is single-threaded. This configuration allows git to use a "reasonable" default # (presently equal to the number of cpus) to execute the fetch in parallel. ret = RunCmd("git", "config --local fetch.parallel 0", workingdir=self.path) - if (ret != 0): + if ret != 0: return ret - return RunCmd("git", - "config --local omnicache.metadata.version {0}".format(OMNICACHE_VERSION), - workingdir=self.path) + return RunCmd( + "git", "config --local omnicache.metadata.version {0}".format(OMNICACHE_VERSION), workingdir=self.path + ) def _ConvertOmnicache(self) -> int: """Converts an existing bare git repo from a previous omnicache version to the current omnicache version.""" logging.info("Converting Omnicache in {0} to latest format.".format(self.path)) - if (os.path.exists(os.path.join(self.path, PRE_0_11_OMNICACHE_FILENAME))): + if os.path.exists(os.path.join(self.path, PRE_0_11_OMNICACHE_FILENAME)): os.remove(os.path.join(self.path, PRE_0_11_OMNICACHE_FILENAME)) remotes = Omnicache.GetRemotes(self.path) logging.info("Renaming non-UUID remotes with UUID.") - for (name, _) in remotes.items(): - if (not Omnicache._IsValidUuid(name)): + for name, _ in remotes.items(): + if not Omnicache._IsValidUuid(name): logging.info("Converting remote {0} to UUID".format(name)) # Rename the remote with a valid UUID newName = str(uuid.uuid4()) ret = RunCmd("git", "remote rename {0} {1}".format(name, newName), workingdir=self.path) - if (ret != 0): + if ret != 0: # rename failed; try removal. logging.warn("Failed to rename {0}. Attempting to remove it.".format(name)) ret = RunCmd("git", "remote remove {0}".format(name), workingdir=self.path) - if (ret != 0): + if ret != 0: logging.critical("Failed to rename or remove {0} - skipping.".format(name)) continue # Remove previous fetch config entries and regenerate them. Proceed to create new ones even if it fails. RunCmd("git", "config --local --unset-all remote.{0}.fetch".format(newName), workingdir=self.path) - RunCmd("git", - "config --local --add remote.{0}.fetch +refs/heads/*:refs/remotes/{0}/*".format(newName), - workingdir=self.path) - RunCmd("git", - "config --local --add remote.{0}.fetch refs/tags/*:refs/rtags/{0}/*".format(newName), - workingdir=self.path) + RunCmd( + "git", + "config --local --add remote.{0}.fetch +refs/heads/*:refs/remotes/{0}/*".format(newName), + workingdir=self.path, + ) + RunCmd( + "git", + "config --local --add remote.{0}.fetch refs/tags/*:refs/rtags/{0}/*".format(newName), + workingdir=self.path, + ) # Add the original name as a display name - RunCmd("git", - "config --local omnicache.{0}.displayname {1}".format(newName, name), - workingdir=self.path) + RunCmd( + "git", "config --local omnicache.{0}.displayname {1}".format(newName, name), workingdir=self.path + ) logging.info("Remote {0} converted to {1}.".format(name, newName)) # delete any tags in the global name space (older omnicaches fetched all tags into the global tag namespace) logging.info("Removing global tags") @@ -159,8 +165,8 @@ def _ConvertOmnicache(self) -> int: logging.info("Removing remotes with duplicate URLs") knownUrls = [] remotes = Omnicache.GetRemotes(self.path) - for (name, url) in remotes.items(): - if (url not in knownUrls): + for name, url in remotes.items(): + if url not in knownUrls: logging.info("Retaining remote {0} with unique URL {1}".format(name, url)) knownUrls.append(url) else: @@ -170,21 +176,21 @@ def _ConvertOmnicache(self) -> int: # by default, git fetch is single-threaded. This configuration allows git to use a "reasonable" default # (presently equal to the number of cpus) to execute the fetch in parallel. ret = RunCmd("git", "config --local fetch.parallel 0", workingdir=self.path) - if (ret != 0): + if ret != 0: return ret # write current omnicache version into cache logging.info("Writing Omnicache version") - return RunCmd("git", - "config --local omnicache.metadata.version {0}".format(OMNICACHE_VERSION), - workingdir=self.path) + return RunCmd( + "git", "config --local omnicache.metadata.version {0}".format(OMNICACHE_VERSION), workingdir=self.path + ) def _RefreshUrlLookupCache(self) -> None: """Refreshes the URL lookup cache.""" - if (len(self.urlLookupCache) == 0): + if len(self.urlLookupCache) == 0: logging.info("Regenerating URL lookup cache.") out = StringIO() ret = RunCmd("git", r"config --local --get-regexp remote\..*?\.url", workingdir=self.path, outstream=out) - if (ret != 0): + if ret != 0: return None # output is in the form: remote..url for remote in out.getvalue().splitlines(): @@ -198,11 +204,11 @@ def _InvalidateUrlLookupCache(self) -> None: def _LookupRemoteForUrl(self, url: str) -> Optional[str]: """Returns the git remote name for the specified URL, or None if it doesn't exist.""" self._RefreshUrlLookupCache() - if (url in self.urlLookupCache): + if url in self.urlLookupCache: return self.urlLookupCache[url] return None - def AddRemote(self, url: str, name:Optional[str]=None) -> int: + def AddRemote(self, url: str, name: Optional[str] = None) -> int: """Adds a remote for the specified URL to the omnicache. Args: @@ -210,31 +216,33 @@ def AddRemote(self, url: str, name:Optional[str]=None) -> int: name(str, optional): provides a "display name" to be associated with this remote. """ # if we already have this remote (i.e. a remote with this URL exists), then just update. - if (self._LookupRemoteForUrl(url) is not None): + if self._LookupRemoteForUrl(url) is not None: return self.UpdateRemote(url, newName=name) # otherwise create it. logging.info("Adding new remote for url {0}".format(url)) newName = str(uuid.uuid4()) ret = RunCmd("git", "remote add {0} {1}".format(newName, url), workingdir=self.path) - if (ret != 0): + if ret != 0: return ret self._InvalidateUrlLookupCache() # add display name, if specified - if (name is not None): - ret = RunCmd("git", - "config --local omnicache.{0}.displayname {1}".format(newName, name), - workingdir=self.path) - if (ret != 0): + if name is not None: + ret = RunCmd( + "git", "config --local omnicache.{0}.displayname {1}".format(newName, name), workingdir=self.path + ) + if ret != 0: return ret # add a special fetch refspec to fetch remote tags into a per-remote local namespace. - return RunCmd("git", - "config --local --add remote.{0}.fetch refs/tags/*:refs/rtags/{0}/*".format(newName), - workingdir=self.path) + return RunCmd( + "git", + "config --local --add remote.{0}.fetch refs/tags/*:refs/rtags/{0}/*".format(newName), + workingdir=self.path, + ) def RemoveRemote(self, url: str) -> int: """Removes the remote for the specified url from the cache.""" name = self._LookupRemoteForUrl(url) - if (name is None): + if name is None: logging.critical("Failed to remove node for url {0}: such a remote does not exist.".format(url)) return 1 logging.info("Removing remote for url {0}".format(url)) @@ -242,7 +250,7 @@ def RemoveRemote(self, url: str) -> int: self._InvalidateUrlLookupCache() return ret - def UpdateRemote(self, oldUrl: str, newUrl: Optional[str]=None, newName: Optional[str]=None) -> int: + def UpdateRemote(self, oldUrl: str, newUrl: Optional[str] = None, newName: Optional[str] = None) -> int: """Updates the remote. Args: @@ -251,32 +259,32 @@ def UpdateRemote(self, oldUrl: str, newUrl: Optional[str]=None, newName: Optiona newName (str, optional): updates the "displayname" for the remote. """ remote = self._LookupRemoteForUrl(oldUrl) - if (remote is None): + if remote is None: logging.critical("Failed to update node for url {0}: such a remote does not exist.".format(oldUrl)) return 1 - if (newName is not None): + if newName is not None: logging.info("Updating display name for url {0} to {1}".format(oldUrl, newName)) - ret = RunCmd("git", - "config --local omnicache.{0}.displayname {1}".format(remote, newName), - workingdir=self.path) - if (ret != 0): + ret = RunCmd( + "git", "config --local omnicache.{0}.displayname {1}".format(remote, newName), workingdir=self.path + ) + if ret != 0: return ret - if (newUrl is not None): + if newUrl is not None: logging.info("Updating url {0} to {1}".format(oldUrl, newUrl)) ret = RunCmd("git", "remote set-url {0} {1}".format(remote, newUrl), workingdir=self.path) self._InvalidateUrlLookupCache() - if (ret != 0): + if ret != 0: return ret return 0 - def Fetch(self, jobs: int=0) -> int: + def Fetch(self, jobs: int = 0) -> int: """Fetches all remotes.""" logging.info("Fetching all remotes.") self._RefreshUrlLookupCache() # Tricky: we pass no-tags here, since we set up custom fetch refs for tags on a per-remote basis. This prevents # git from fetching the first set of tags into the global namespace. - if (jobs != 0): + if jobs != 0: return RunCmd("git", "fetch --all -j {0} --no-tags".format(jobs), workingdir=self.path) else: return RunCmd("git", "fetch --all --no-tags", workingdir=self.path) @@ -296,16 +304,15 @@ def GetRemoteData(self) -> dict: remoteData[self.urlLookupCache[url]] = {"url": url} out = StringIO() - ret = RunCmd("git", - r"config --local --get-regexp omnicache\..*?\.displayname", - workingdir=self.path, - outstream=out) - if (ret != 0): + ret = RunCmd( + "git", r"config --local --get-regexp omnicache\..*?\.displayname", workingdir=self.path, outstream=out + ) + if ret != 0: return remoteData for displayName in out.getvalue().splitlines(): remoteName = displayName.split()[0].split(".")[1] - if (remoteName in remoteData.keys()): + if remoteName in remoteData.keys(): remoteData[remoteName].update({"displayname": displayName.split()[1]}) return remoteData @@ -313,9 +320,9 @@ def List(self) -> None: """Prints the current set of remotes.""" print("List OMNICACHE content:\n") remoteData = self.GetRemoteData() - if (len(remoteData) == 0): + if len(remoteData) == 0: print("No remotes.") - for (name, data) in remoteData.items(): + for name, data in remoteData.items(): print("Id {0}: {1}".format(name, str(data))) @staticmethod @@ -330,7 +337,7 @@ def GetRemotes(path: str) -> dict: out = StringIO() ret = RunCmd("git", "remote -v", workingdir=path, outstream=out) - if (ret != 0): + if ret != 0: return remotes # Note: this loop assumes fetch and push URLs will be identical. If not, the last URL output will be the result. @@ -368,20 +375,20 @@ def ProcessInputConfig(omnicache: Omnicache, input_config: str) -> int: def ScanDirectory(omnicache: Omnicache, scanpath: str) -> int: """Recursively scans a directory for git repositories and adds remotes and submodule remotes to omnicache.""" logging.info("Scanning {0} for remotes to add.".format(scanpath)) - if (not os.path.isdir(scanpath)): + if not os.path.isdir(scanpath): logging.critical("specified scan path is invalid.") return -1 - for (dirpath, dirnames, filenames) in os.walk(scanpath): - if (".git" in dirnames): + for dirpath, dirnames, filenames in os.walk(scanpath): + if ".git" in dirnames: newRemotes = Omnicache.GetRemotes(dirpath) - for (name, url) in newRemotes.items(): + for name, url in newRemotes.items(): omnicache.AddRemote(url, name) - if (".gitmodules" in filenames): + if ".gitmodules" in filenames: out = StringIO() ret = RunCmd("git", "config --file .gitmodules --get-regexp url", workingdir=dirpath, outstream=out) - if (ret == 0): + if ret == 0: for submodule in out.getvalue().splitlines(): url = submodule.split()[1] name = submodule.split()[0].split(".")[1] @@ -394,9 +401,9 @@ def Export(omnicache: Omnicache, exportPath: str) -> int: """Exports omnicache configuration to YAML.""" logging.info("Exporting omnicache config for {0} to {1}".format(omnicache.path, exportPath)) content = [] - for (name, data) in omnicache.GetRemoteData().items(): + for name, data in omnicache.GetRemoteData().items(): remoteToWrite = {"url": data["url"]} - if ("displayname" in data): + if "displayname" in data: remoteToWrite["name"] = data["displayname"] else: remoteToWrite["name"] = name @@ -410,35 +417,89 @@ def Export(omnicache: Omnicache, exportPath: str) -> int: def get_cli_options() -> argparse.Namespace: """Add CLI arguments to argparse for controlling the omnicache.""" - parser = argparse.ArgumentParser(description='Tool to provide easy method create and manage the OMNICACHE', ) + parser = argparse.ArgumentParser( + description="Tool to provide easy method create and manage the OMNICACHE", + ) parser.add_argument(dest="cache_dir", help="path to an existing or desired OMNICACHE directory") - parser.add_argument("--scan", dest="scan", default=None, - help="Scans the path provided for top-level folders with repos to add to the OMNICACHE") - parser.add_argument("--new", dest="new", help="Initialize the OMNICACHE. MUST NOT EXIST", - action="store_true", default=False) - parser.add_argument("--init", dest="init", help="Initialize the OMNICACHE if it doesn't already exist", - action="store_true", default=False) - parser.add_argument("-l", "--list", dest="list", default=False, action="store_true", - help="List config of OMNICACHE") - parser.add_argument("-a", "--add", dest="add", nargs=2, action="append", - help="Add config entry to OMNICACHE ", - default=[]) - parser.add_argument("-c", "--configfile", dest="input_config_file", default=None, - help="Add new entries from config file to OMNICACHE") - parser.add_argument("-e", "--exportConfig", dest="output_config_file", default=None, - help="Export current omnicache config as a yaml file") + parser.add_argument( + "--scan", + dest="scan", + default=None, + help="Scans the path provided for top-level folders with repos to add to the OMNICACHE", + ) + parser.add_argument( + "--new", dest="new", help="Initialize the OMNICACHE. MUST NOT EXIST", action="store_true", default=False + ) + parser.add_argument( + "--init", + dest="init", + help="Initialize the OMNICACHE if it doesn't already exist", + action="store_true", + default=False, + ) + parser.add_argument( + "-l", "--list", dest="list", default=False, action="store_true", help="List config of OMNICACHE" + ) + parser.add_argument( + "-a", + "--add", + dest="add", + nargs=2, + action="append", + help="Add config entry to OMNICACHE ", + default=[], + ) + parser.add_argument( + "-c", + "--configfile", + dest="input_config_file", + default=None, + help="Add new entries from config file to OMNICACHE", + ) + parser.add_argument( + "-e", + "--exportConfig", + dest="output_config_file", + default=None, + help="Export current omnicache config as a yaml file", + ) group = parser.add_mutually_exclusive_group() - group.add_argument("-u", "--update", "--fetch", dest="fetch", action="store_true", - help="Update the Omnicache. All cache changes also cause a fetch", default=False) - group.add_argument("--no-fetch", dest="no_fetch", action="store_true", - help="Prevent auto-fetch if implied by other arguments.", default=False) - group.add_argument("--fetch-jobs", dest="fetch_jobs", type=int, - help="Specify the number of parallel threads (jobs) for fetch operation.", default=0) - parser.add_argument("-r", "--remove", dest="remove", nargs=1, action="append", - help="remove config entry from OMNICACHE ", default=[]) - parser.add_argument('--version', action='version', version='%(prog)s ' + OMNICACHE_VERSION) - parser.add_argument("--debug", dest="debug", help="Output all debug messages to console", - action="store_true", default=False) + group.add_argument( + "-u", + "--update", + "--fetch", + dest="fetch", + action="store_true", + help="Update the Omnicache. All cache changes also cause a fetch", + default=False, + ) + group.add_argument( + "--no-fetch", + dest="no_fetch", + action="store_true", + help="Prevent auto-fetch if implied by other arguments.", + default=False, + ) + group.add_argument( + "--fetch-jobs", + dest="fetch_jobs", + type=int, + help="Specify the number of parallel threads (jobs) for fetch operation.", + default=0, + ) + parser.add_argument( + "-r", + "--remove", + dest="remove", + nargs=1, + action="append", + help="remove config entry from OMNICACHE ", + default=[], + ) + parser.add_argument("--version", action="version", version="%(prog)s " + OMNICACHE_VERSION) + parser.add_argument( + "--debug", dest="debug", help="Output all debug messages to console", action="store_true", default=False + ) args = parser.parse_args() return args @@ -446,7 +507,7 @@ def get_cli_options() -> argparse.Namespace: def main() -> int: """Main entry point to managing the omnicache.""" # setup main console as logger - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.NOTSET) console = edk2_logging.setup_console_logging(False) logger.addHandler(console) @@ -456,8 +517,7 @@ def main() -> int: if args.debug: console.setLevel(logging.DEBUG) - logging.info("Log Started: " + datetime.datetime.strftime( - datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p")) + logging.info("Log Started: " + datetime.datetime.strftime(datetime.datetime.now(), "%A, %B %d, %Y %I:%M%p")) args.cache_dir = os.path.realpath(os.path.abspath(args.cache_dir)) logging.debug("OMNICACHE dir: {0}".format(args.cache_dir)) @@ -490,54 +550,54 @@ def main() -> int: omnicache = Omnicache(args.cache_dir) # add: add new source(s) to omnicache from command line arg. - if (len(args.add) > 0): - for (name, url) in args.add: + if len(args.add) > 0: + for name, url in args.add: ret = omnicache.AddRemote(url, name) - if (ret != 0): + if ret != 0: return -3 auto_fetch = True # config: add or update sources(s) from config file. - if (args.input_config_file is not None): + if args.input_config_file is not None: ret = ProcessInputConfig(omnicache, args.input_config_file) - if (ret != 0): + if ret != 0: return -4 auto_fetch = True # remove: remove source(s) from omnicache as specified by command line arg. - if (len(args.remove) > 0): + if len(args.remove) > 0: for url in args.remove: ret = omnicache.RemoveRemote(url) - if (ret != 0): + if ret != 0: return -4 # scan: recursively scan the given directory and add all repos and submodules - if (args.scan is not None): + if args.scan is not None: ret = ScanDirectory(omnicache, args.scan) - if (ret != 0): + if ret != 0: return -5 auto_fetch = True # fetch: update the omnicache with objects from its remotes. # Note: errors are ignored here, since transient network failures may occur that prevent cache update. Those just # mean the omnicache may be a a little stale which should not be fatal to users of the cache. - if (args.fetch or (auto_fetch and not args.no_fetch)): + if args.fetch or (auto_fetch and not args.no_fetch): omnicache.Fetch(args.fetch_jobs) # list: print out the omnicache contents. - if (args.list): + if args.list: omnicache.List() # export: - if (args.output_config_file): + if args.output_config_file: ret = Export(omnicache, args.output_config_file) - if (ret != 0): + if ret != 0: return -6 return 0 -if __name__ == '__main__': +if __name__ == "__main__": retcode = main() logging.shutdown() sys.exit(retcode) diff --git a/edk2toolext/uefi/sig_db_tool.py b/edk2toolext/uefi/sig_db_tool.py index 879a1c33..295d109c 100644 --- a/edk2toolext/uefi/sig_db_tool.py +++ b/edk2toolext/uefi/sig_db_tool.py @@ -27,10 +27,10 @@ def main() -> None: Parses command-line parameters using ArgumentParser delegating to helper functions to fulfill the requests. """ - filenameHelp = 'Filename containing a UEFI Signature Database, \ - a concatenation of EFI_SIGNATURE_LISTs as read from GetVariable([PK, KEK, db, dbx])' + filenameHelp = "Filename containing a UEFI Signature Database, \ + a concatenation of EFI_SIGNATURE_LISTs as read from GetVariable([PK, KEK, db, dbx])" - sig_db_examples = ''' + sig_db_examples = """ examples: sig_db dump dbx_before.bin @@ -40,30 +40,44 @@ def main() -> None: sig_db --compact get_dupes dbx_with_dupes.bin sig_db --compact get_canonical mixed_up_dbx.bin -''' +""" - parser = argparse.ArgumentParser(description='UEFI Signature database inspection tool', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=sig_db_examples) + parser = argparse.ArgumentParser( + description="UEFI Signature database inspection tool", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=sig_db_examples, + ) - parser.add_argument('--compact', action='store_true', - help='Compact, 1 line per data element output for easier diff-ing') + parser.add_argument( + "--compact", action="store_true", help="Compact, 1 line per data element output for easier diff-ing" + ) - subparsers = parser.add_subparsers(required=False, dest='action') + subparsers = parser.add_subparsers(required=False, dest="action") - parser_dump = subparsers.add_parser('dump', help='Print a UEFI Signature Database as-is in human-readable form') - parser_dump.add_argument('file', type=str, help=filenameHelp) + parser_dump = subparsers.add_parser("dump", help="Print a UEFI Signature Database as-is in human-readable form") + parser_dump.add_argument("file", type=str, help=filenameHelp) - parser_get_dupes = subparsers.add_parser('get_dupes', help='Find duplicate signature entries in a UEFI Signature \ + parser_get_dupes = subparsers.add_parser( + "get_dupes", + help="Find duplicate signature entries in a UEFI Signature \ Database. The test for duplication ignores SignatureOwner, testing only the SignatureData field. \ - Print them in UEFI Signature Database format, ordering is NOT maintained, output is NOT itself deduplicated') - parser_get_dupes.add_argument('file', type=str, help='Filename of a UEFI Signature Database \ - (concatenation of EFI_SIGNATURE_LISTs as read from GetVariable() )') - - parser_get_canonical = subparsers.add_parser('get_canonical', help='Reduce a UEFI Signature Database to a \ - canonical (de-duplicated, sorted) form and print it') - parser_get_canonical.add_argument('file', type=str, - help='The name of the UEFI Signature Database file to get_canonical') + Print them in UEFI Signature Database format, ordering is NOT maintained, output is NOT itself deduplicated", + ) + parser_get_dupes.add_argument( + "file", + type=str, + help="Filename of a UEFI Signature Database \ + (concatenation of EFI_SIGNATURE_LISTs as read from GetVariable() )", + ) + + parser_get_canonical = subparsers.add_parser( + "get_canonical", + help="Reduce a UEFI Signature Database to a \ + canonical (de-duplicated, sorted) form and print it", + ) + parser_get_canonical.add_argument( + "file", type=str, help="The name of the UEFI Signature Database file to get_canonical" + ) options = parser.parse_args() @@ -72,11 +86,11 @@ def main() -> None: return try: - with open(options.file, 'rb') as f: + with open(options.file, "rb") as f: esd = EfiSignatureDatabase(f) - if (options.action == 'get_dupes'): + if options.action == "get_dupes": esd = esd.GetDuplicates() - elif (options.action == 'get_canonical'): + elif options.action == "get_canonical": esd = esd.GetCanonical() esd.Print(compact=options.compact) @@ -85,5 +99,5 @@ def main() -> None: print('ERROR: File not found: "{0}"'.format(options.file)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/edk2toolext/versioninfo/versioninfo_helper.py b/edk2toolext/versioninfo/versioninfo_helper.py index 6ee015e1..a15658c0 100644 --- a/edk2toolext/versioninfo/versioninfo_helper.py +++ b/edk2toolext/versioninfo/versioninfo_helper.py @@ -16,6 +16,7 @@ from json files, along with the functions to output version information from PE/PE+ files. """ + import json import logging from datetime import datetime @@ -45,7 +46,7 @@ class PEStrings(object): 0x00010004: "VOS_DOS_WINDOWS32", 0x00040004: "VOS_NT_WINDOWS32", 0x00020002: "VOS_OS216_PM16", - 0x00030003: "VOS_OS232_PM32" + 0x00030003: "VOS_OS232_PM32", } FILE_TYPE_STRINGS = { @@ -55,7 +56,7 @@ class PEStrings(object): 0x00000004: "VFT_FONT", 0x00000007: "VFT_STATIC_LIB", 0x00000000: "VFT_UNKNOWN", - 0x00000005: "VFT_VXD" + 0x00000005: "VFT_VXD", } FILE_SUBTYPE_NOFONT_STRINGS = { @@ -70,17 +71,17 @@ class PEStrings(object): 0x00000009: "VFT2_DRV_SOUND", 0x00000007: "VFT2_DRV_SYSTEM", 0x0000000C: "VFT2_DRV_VERSIONED_PRINTER", - 0x00000000: "VFT2_UNKNOWN" + 0x00000000: "VFT2_UNKNOWN", } FILE_SUBTYPE_FONT_STRINGS = { 0x00000001: "VFT2_FONT_RASTER", 0x00000003: "VFT2_FONT_TRUETYPE", 0x00000002: "VFT2_FONT_VECTOR", - 0x00000000: "VFT2_UNKNOWN" + 0x00000000: "VFT2_UNKNOWN", } - VALID_SIGNATURE = 0xfeef04bd + VALID_SIGNATURE = 0xFEEF04BD DEFAULT_TRANSLATION = "0x0409,0x04b0" DEFAULT_BLOCK_HEADER = "040904b0" @@ -128,11 +129,7 @@ class PEStrings(object): MINIMAL_MODE_STR = "MINIMAL" # Validation requirements - VERSIONFILE_REQUIRED_FIELDS = { - FILE_VERSION_STR, - STRING_FILE_INFO_STR, - VAR_FILE_INFO_STR - } + VERSIONFILE_REQUIRED_FIELDS = {FILE_VERSION_STR, STRING_FILE_INFO_STR, VAR_FILE_INFO_STR} VERSIONFILE_ALLOWED_FIELDS = { FILE_VERSION_STR, @@ -148,7 +145,7 @@ class PEStrings(object): STRUC_VERSION_STR, FILE_DATE_STR, FILE_DATE_MS_STR, - FILE_DATE_LS_STR + FILE_DATE_LS_STR, } COMPANY_NAME_STR = "CompanyName" @@ -173,7 +170,7 @@ class PEStrings(object): "PrivateBuild", "ProductName", "ProductVersion", - "SpecialBuild" + "SpecialBuild", } VALID_FILE_OS_VALUES = { @@ -184,18 +181,10 @@ class PEStrings(object): "VOS__WINDOWS32", "VOS_DOS_WINDOWS16", "VOS_DOS_WINDOWS32", - "VOS_NT_WINDOWS32" + "VOS_NT_WINDOWS32", } - VALID_FILE_TYPE_VALUES = { - "VFT_APP", - "VFT_DLL", - "VFT_DRV", - "VFT_FONT", - "VFT_STATIC_LIB", - "VFT_UNKNOWN", - "VFT_VXD" - } + VALID_FILE_TYPE_VALUES = {"VFT_APP", "VFT_DLL", "VFT_DRV", "VFT_FONT", "VFT_STATIC_LIB", "VFT_UNKNOWN", "VFT_VXD"} VALID_SUBTYPE_VFT_DRV = { "VFT2_UNKNOWN", @@ -209,50 +198,60 @@ class PEStrings(object): "VFT2_DRV_SYSTEM", "VFT2_DRV_INSTALLABLE", "VFT2_DRV_SOUND", - "VFT2_DRV_VERSIONED_PRINTER" + "VFT2_DRV_VERSIONED_PRINTER", } - VALID_SUBTYPE_VFT_FONT = { - "VFT2_UNKNOWN", - "VFT2_FONT_RASTER", - "VFT2_FONT_VECTOR", - "VFT2_FONT_TRUETYPE" - } + VALID_SUBTYPE_VFT_FONT = {"VFT2_UNKNOWN", "VFT2_FONT_RASTER", "VFT2_FONT_VECTOR", "VFT2_FONT_TRUETYPE"} VALID_LANG_ID = { - 0x0401, 0x0402, - 0x0403, 0x0404, - 0x0405, 0x0406, - 0x0407, 0x0408, - 0x0409, 0x040A, - 0x040B, 0x040C, - 0x040D, 0x040E, - 0x040F, 0x0410, - 0x0411, 0x0412, - 0x0413, 0x0414, - 0x0415, 0x0416, - 0x0417, 0x0418, - 0x0419, 0x041A, - 0x041B, 0x041C, - 0x041D, 0x041E, - 0x041F, 0x0420, - 0x0421, 0x0804, - 0x0807, 0x0809, - 0x080A, 0x080C, - 0x0C0C, 0x100C, - 0x0816, 0x081A, - 0x0810, 0x0813, - 0x0814 + 0x0401, + 0x0402, + 0x0403, + 0x0404, + 0x0405, + 0x0406, + 0x0407, + 0x0408, + 0x0409, + 0x040A, + 0x040B, + 0x040C, + 0x040D, + 0x040E, + 0x040F, + 0x0410, + 0x0411, + 0x0412, + 0x0413, + 0x0414, + 0x0415, + 0x0416, + 0x0417, + 0x0418, + 0x0419, + 0x041A, + 0x041B, + 0x041C, + 0x041D, + 0x041E, + 0x041F, + 0x0420, + 0x0421, + 0x0804, + 0x0807, + 0x0809, + 0x080A, + 0x080C, + 0x0C0C, + 0x100C, + 0x0816, + 0x081A, + 0x0810, + 0x0813, + 0x0814, } - VALID_CHARSET_ID = { - 0x0000, 0x03A4, - 0x03B5, 0x03B6, - 0x04B0, 0x04E2, - 0x04E3, 0x04E4, - 0x04E5, 0x04E6, - 0x04E7, 0x04E8 - } + VALID_CHARSET_ID = {0x0000, 0x03A4, 0x03B5, 0x03B6, 0x04B0, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7, 0x04E8} def validate_version_number(version_str: str) -> bool: @@ -264,13 +263,17 @@ def validate_version_number(version_str: str) -> bool: Returns: (bool): if the version string is valid or not """ - if version_str.count('.') != 3 and version_str.count(',') != 3: - logging.error("Invalid version string: " + version_str + ". Version must be in form " - + "\"INTEGER.INTEGER.INTEGER.INTEGER\".") + if version_str.count(".") != 3 and version_str.count(",") != 3: + logging.error( + "Invalid version string: " + + version_str + + ". Version must be in form " + + '"INTEGER.INTEGER.INTEGER.INTEGER".' + ) return False split = None - if version_str.count('.') == 3: + if version_str.count(".") == 3: split = version_str.split(".") else: split = version_str.split(",") @@ -281,8 +284,12 @@ def validate_version_number(version_str: str) -> bool: logging.error("Integer overflow in version string: " + version_str + ".") return False except ValueError: - logging.error("Invalid version string: " + version_str + ". Version must be in form \"" - + " INTEGER.INTEGER.INTEGER.INTEGER\".") + logging.error( + "Invalid version string: " + + version_str + + '. Version must be in form "' + + ' INTEGER.INTEGER.INTEGER.INTEGER".' + ) return False return True @@ -300,7 +307,7 @@ def version_str_to_int(version_str: str) -> Tuple[int, int]: (Tuple[int, int]): (32 MS bits, 32 LS bits) """ split = None - if version_str.count('.') == 3: + if version_str.count(".") == 3: split = version_str.split(".") else: split = version_str.split(",") @@ -319,7 +326,7 @@ def hex_to_version_str(val: int) -> str: Returns: (str): string represention """ - return str(((val & ~0) >> 16) & 0xffff) + "." + str(val & 0xffff) + return str(((val & ~0) >> 16) & 0xFFFF) + "." + str(val & 0xFFFF) class PEObject(object): @@ -327,6 +334,7 @@ class PEObject(object): Gives functionality for reading VS_VERSIONINFO metadata and .rsrc section. """ + _pe: pefile.PE = None def __init__(self, filepath: str) -> None: @@ -396,36 +404,55 @@ def get_version_dict(self) -> dict: vs_fixedfileinfo_dict = self._pe.VS_FIXEDFILEINFO[0].dump_dict() for key in vs_fixedfileinfo_dict.keys(): # Skip sections that have dependencies - if key == PEStrings.PE_STRUCT_STR or \ - key == PEStrings.FILE_SUBTYPE_PEFILE or \ - key == PEStrings.FILE_VERSION_LS_PEFILE or \ - key == PEStrings.PRODUCT_VERSION_LS_PEFILE or \ - key == PEStrings.FILE_DATE_LS_PEFILE: + if ( + key == PEStrings.PE_STRUCT_STR + or key == PEStrings.FILE_SUBTYPE_PEFILE + or key == PEStrings.FILE_VERSION_LS_PEFILE + or key == PEStrings.PRODUCT_VERSION_LS_PEFILE + or key == PEStrings.FILE_DATE_LS_PEFILE + ): continue self._populate_entry(key, vs_fixedfileinfo_dict[key][PEStrings.PE_VALUE_STR], result) # Resolve dependent fields - if PEStrings.FILE_VERSION_MS_PEFILE in vs_fixedfileinfo_dict.keys() and \ - PEStrings.FILE_VERSION_LS_PEFILE in vs_fixedfileinfo_dict.keys(): - self._populate_entry(PEStrings.FILE_VERSION_LS_PEFILE, - vs_fixedfileinfo_dict[PEStrings.FILE_VERSION_LS_PEFILE][PEStrings.PE_VALUE_STR], result) # noqa - - if PEStrings.PRODUCT_VERSION_MS_PEFILE in vs_fixedfileinfo_dict.keys() and \ - PEStrings.PRODUCT_VERSION_LS_PEFILE in vs_fixedfileinfo_dict.keys(): - self._populate_entry(PEStrings.PRODUCT_VERSION_LS_PEFILE, - vs_fixedfileinfo_dict[PEStrings.PRODUCT_VERSION_LS_PEFILE][PEStrings.PE_VALUE_STR], result) # noqa - - if PEStrings.FILE_DATE_MS_PEFILE in vs_fixedfileinfo_dict.keys() and \ - PEStrings.FILE_DATE_LS_PEFILE in vs_fixedfileinfo_dict.keys(): - self._populate_entry(PEStrings.FILE_DATE_LS_PEFILE, - vs_fixedfileinfo_dict[PEStrings.FILE_DATE_LS_PEFILE][PEStrings.PE_VALUE_STR], result) # noqa + if ( + PEStrings.FILE_VERSION_MS_PEFILE in vs_fixedfileinfo_dict.keys() + and PEStrings.FILE_VERSION_LS_PEFILE in vs_fixedfileinfo_dict.keys() + ): + self._populate_entry( + PEStrings.FILE_VERSION_LS_PEFILE, + vs_fixedfileinfo_dict[PEStrings.FILE_VERSION_LS_PEFILE][PEStrings.PE_VALUE_STR], + result, + ) # noqa + + if ( + PEStrings.PRODUCT_VERSION_MS_PEFILE in vs_fixedfileinfo_dict.keys() + and PEStrings.PRODUCT_VERSION_LS_PEFILE in vs_fixedfileinfo_dict.keys() + ): + self._populate_entry( + PEStrings.PRODUCT_VERSION_LS_PEFILE, + vs_fixedfileinfo_dict[PEStrings.PRODUCT_VERSION_LS_PEFILE][PEStrings.PE_VALUE_STR], + result, + ) # noqa + + if ( + PEStrings.FILE_DATE_MS_PEFILE in vs_fixedfileinfo_dict.keys() + and PEStrings.FILE_DATE_LS_PEFILE in vs_fixedfileinfo_dict.keys() + ): + self._populate_entry( + PEStrings.FILE_DATE_LS_PEFILE, + vs_fixedfileinfo_dict[PEStrings.FILE_DATE_LS_PEFILE][PEStrings.PE_VALUE_STR], + result, + ) # noqa if PEStrings.FILE_SUBTYPE_PEFILE in vs_fixedfileinfo_dict.keys(): file_subtype = vs_fixedfileinfo_dict[PEStrings.FILE_SUBTYPE_PEFILE][PEStrings.PE_VALUE_STR] if PEStrings.FILE_TYPE_STR in result and result[PEStrings.FILE_TYPE_STR] == PEStrings.VFT_FONT_STR: if file_subtype in PEStrings.FILE_SUBTYPE_FONT_STRINGS: - result[PEStrings.FILE_SUBTYPE_PEFILE] = PEStrings.FILE_SUBTYPE_FONT_STRINGS[PEStrings.file_subtype] # noqa + result[PEStrings.FILE_SUBTYPE_PEFILE] = PEStrings.FILE_SUBTYPE_FONT_STRINGS[ + PEStrings.file_subtype + ] # noqa else: result[PEStrings.FILE_SUBTYPE_PEFILE] = file_subtype else: @@ -444,7 +471,9 @@ def get_version_dict(self) -> dict: stringfileinfo_dict = {} for strTable in entry.StringTable: for item in strTable.entries.items(): - stringfileinfo_dict[item[0].decode(PEStrings.PE_ENCODING)] = item[1].decode(PEStrings.PE_ENCODING) # noqa + stringfileinfo_dict[item[0].decode(PEStrings.PE_ENCODING)] = item[1].decode( + PEStrings.PE_ENCODING + ) # noqa result[PEStrings.STRING_FILE_INFO_STR] = stringfileinfo_dict elif entry.Key.decode(PEStrings.PE_ENCODING).replace("\x00", "") == PEStrings.VAR_FILE_INFO_STR: varfileinfo_dict = {} @@ -503,10 +532,11 @@ class VERSIONINFOGenerator(object): "OriginalFilename": "ExampleApp.efi" } """ + _minimal_required_fields = { PEStrings.FILE_VERSION_STR.upper(), PEStrings.COMPANY_NAME_STR.upper(), - PEStrings.ORIGINAL_FILENAME_STR.upper() + PEStrings.ORIGINAL_FILENAME_STR.upper(), } _version_dict = None @@ -593,18 +623,29 @@ def validate(self) -> bool: if PEStrings.FILE_SUBTYPE_STR in self._version_dict: if self._version_dict[PEStrings.FILE_TYPE_STR] == "VFT_DRV": if self._version_dict[PEStrings.FILE_SUBTYPE_STR] not in PEStrings.VALID_SUBTYPE_VFT_DRV: - logging.error("Invalid FILESUBTYPE value for FILETYPE VFT_DRV: " - + self._version_dict[PEStrings.FILE_SUBTYPE_STR] + ".") + logging.error( + "Invalid FILESUBTYPE value for FILETYPE VFT_DRV: " + + self._version_dict[PEStrings.FILE_SUBTYPE_STR] + + "." + ) valid = False elif self._version_dict[PEStrings.FILE_TYPE_STR] == "VFT_FONT": if self._version_dict[PEStrings.FILE_SUBTYPE_STR] not in PEStrings.VALID_SUBTYPE_VFT_FONT: - logging.error("Invalid FILESUBTYPE value for FILETYPE VFT_FONT: " - + self._version_dict[PEStrings.FILE_SUBTYPE_STR] + ".") + logging.error( + "Invalid FILESUBTYPE value for FILETYPE VFT_FONT: " + + self._version_dict[PEStrings.FILE_SUBTYPE_STR] + + "." + ) valid = False - elif (self._version_dict[PEStrings.FILE_TYPE_STR] != "VFT_VXD" - and self._version_dict[PEStrings.FILE_SUBTYPE_STR] != 0): - logging.error("Invalid FILESUBTYPE value for FILETYPE " - + self._version_dict[PEStrings.FILE_TYPE_STR] + ", value must be 0.") + elif ( + self._version_dict[PEStrings.FILE_TYPE_STR] != "VFT_VXD" + and self._version_dict[PEStrings.FILE_SUBTYPE_STR] != 0 + ): + logging.error( + "Invalid FILESUBTYPE value for FILETYPE " + + self._version_dict[PEStrings.FILE_TYPE_STR] + + ", value must be 0." + ) valid = False elif PEStrings.FILE_SUBTYPE_STR in self._version_dict: logging.error("Missing parameter: must have FileType if FileSubtype defined.") @@ -616,14 +657,22 @@ def validate(self) -> bool: if len(langid_set) != 2: logging.error("Translation field must contain 2 space delimited hexidecimal bytes.") valid = False - elif (int(langid_set[0].replace('"', ''), 0) not in PEStrings.VALID_LANG_ID - or int(langid_set[1].replace('"', ''), 0) not in PEStrings.VALID_CHARSET_ID): - logging.error("Invalid language code: " - + self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR] + ".") # noqa + elif ( + int(langid_set[0].replace('"', ""), 0) not in PEStrings.VALID_LANG_ID + or int(langid_set[1].replace('"', ""), 0) not in PEStrings.VALID_CHARSET_ID + ): + logging.error( + "Invalid language code: " + + self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR] + + "." + ) # noqa valid = False except ValueError: - logging.error("Invalid language code: " - + self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR] + ".") # noqa + logging.error( + "Invalid language code: " + + self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR] + + "." + ) # noqa valid = False else: logging.error("Missing required parameter in VarFileInfo: Translation.") @@ -684,25 +733,69 @@ def write_minimal(self, path: str, version: str) -> bool: version = self._version_dict[PEStrings.FILE_VERSION_STR.upper()].split(".") if len(version) != 4: version = self._version_dict[PEStrings.FILE_VERSION_STR.upper()].split(",") - out_str += version[0] + ',' + version[1] + ',' + version[2] + ',' + version[3] + "\n" + out_str += version[0] + "," + version[1] + "," + version[2] + "," + version[3] + "\n" # StringFileInfo - out_str += "\n" + PEStrings.BEGIN_STR + "\n\t" \ - + PEStrings.BLOCK_STR + " \"" + PEStrings.STRING_FILE_INFO_STR \ - + "\"\n\t" + PEStrings.BEGIN_STR + "\n" + "\t\t" \ - + PEStrings.BLOCK_STR + " \"" + PEStrings.DEFAULT_BLOCK_HEADER \ - + "\"\n\t\t" + PEStrings.BEGIN_STR + "\n" + "\t\t" + PEStrings.VALUE_STR \ - + ' "' + PEStrings.COMPANY_NAME_STR + '",\t"' \ - + self._version_dict[PEStrings.COMPANY_NAME_STR.upper()] + "\"\n" \ - + "\t\t" + PEStrings.VALUE_STR + ' "' + PEStrings.ORIGINAL_FILENAME_STR + '",\t"' \ - + self._version_dict[PEStrings.ORIGINAL_FILENAME_STR.upper()] + "\"\n" \ - + "\t\t" + PEStrings.END_STR + "\n\t" + PEStrings.END_STR + "\n\n" + out_str += ( + "\n" + + PEStrings.BEGIN_STR + + "\n\t" + + PEStrings.BLOCK_STR + + ' "' + + PEStrings.STRING_FILE_INFO_STR + + '"\n\t' + + PEStrings.BEGIN_STR + + "\n" + + "\t\t" + + PEStrings.BLOCK_STR + + ' "' + + PEStrings.DEFAULT_BLOCK_HEADER + + '"\n\t\t' + + PEStrings.BEGIN_STR + + "\n" + + "\t\t" + + PEStrings.VALUE_STR + + ' "' + + PEStrings.COMPANY_NAME_STR + + '",\t"' + + self._version_dict[PEStrings.COMPANY_NAME_STR.upper()] + + '"\n' + + "\t\t" + + PEStrings.VALUE_STR + + ' "' + + PEStrings.ORIGINAL_FILENAME_STR + + '",\t"' + + self._version_dict[PEStrings.ORIGINAL_FILENAME_STR.upper()] + + '"\n' + + "\t\t" + + PEStrings.END_STR + + "\n\t" + + PEStrings.END_STR + + "\n\n" + ) # VarFileInfo - out_str += "\t" + PEStrings.BLOCK_STR + " \"" + PEStrings.VAR_FILE_INFO_STR + '"\n\t' \ - + PEStrings.BEGIN_STR + "\n" + "\t\t" + PEStrings.VALUE_STR + ' "' \ - + PEStrings.TRANSLATION_STR + '",\t' + PEStrings.DEFAULT_TRANSLATION + "\n" \ - + "\t" + PEStrings.END_STR + "\n" + PEStrings.END_STR + "\n#endif" + out_str += ( + "\t" + + PEStrings.BLOCK_STR + + ' "' + + PEStrings.VAR_FILE_INFO_STR + + '"\n\t' + + PEStrings.BEGIN_STR + + "\n" + + "\t\t" + + PEStrings.VALUE_STR + + ' "' + + PEStrings.TRANSLATION_STR + + '",\t' + + PEStrings.DEFAULT_TRANSLATION + + "\n" + + "\t" + + PEStrings.END_STR + + "\n" + + PEStrings.END_STR + + "\n#endif" + ) with open(path, "w") as out: out.write(out_str) @@ -758,38 +851,53 @@ def write(self, path: str, version: str) -> bool: # Header fields out_str += "VS_VERSION_INFO\tVERSIONINFO\n" for param in self._version_dict.keys(): - if (param == PEStrings.STRING_FILE_INFO_STR.upper() - or param == PEStrings.VAR_FILE_INFO_STR.upper()): + if param == PEStrings.STRING_FILE_INFO_STR.upper() or param == PEStrings.VAR_FILE_INFO_STR.upper(): continue if param == PEStrings.PRODUCT_VERSION_STR or param == PEStrings.FILE_VERSION_STR: out_str += param.upper() + "\t" version = self._version_dict[param].split(".") - out_str += version[0] + ',' + version[1] + ',' + version[2] + ',' + version[3] + "\n" + out_str += version[0] + "," + version[1] + "," + version[2] + "," + version[3] + "\n" else: out_str += param.upper() + "\t" + str(self._version_dict[param]) + "\n" # StringFileInfo out_str += "\n" + PEStrings.BEGIN_STR + "\n\t" - out_str += PEStrings.BLOCK_STR + " \"" + PEStrings.STRING_FILE_INFO_STR + "\"\n\t" + PEStrings.BEGIN_STR + "\n" + out_str += PEStrings.BLOCK_STR + ' "' + PEStrings.STRING_FILE_INFO_STR + '"\n\t' + PEStrings.BEGIN_STR + "\n" language_code = "" for code in self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR].split(" "): language_code += code.split("0x", 1)[1] - out_str += "\t\t" + PEStrings.BLOCK_STR + " \"" + language_code + "\"\n\t\t" + PEStrings.BEGIN_STR + "\n" + out_str += "\t\t" + PEStrings.BLOCK_STR + ' "' + language_code + '"\n\t\t' + PEStrings.BEGIN_STR + "\n" for field in self._version_dict[PEStrings.STRING_FILE_INFO_STR.upper()].keys(): - out_str += "\t\t" + PEStrings.VALUE_STR + " \"" + field + "\",\t\"" \ - + self._version_dict[PEStrings.STRING_FILE_INFO_STR.upper()][field] + "\"\n" + out_str += ( + "\t\t" + + PEStrings.VALUE_STR + + ' "' + + field + + '",\t"' + + self._version_dict[PEStrings.STRING_FILE_INFO_STR.upper()][field] + + '"\n' + ) out_str += "\t\t" + PEStrings.END_STR + "\n\t" + PEStrings.END_STR + "\n\n" # VarFileInfo out_str += "\t" + PEStrings.BLOCK_STR - out_str += " \"" + PEStrings.VAR_FILE_INFO_STR + "\"\n\t" + PEStrings.BEGIN_STR + "\n" + out_str += ' "' + PEStrings.VAR_FILE_INFO_STR + '"\n\t' + PEStrings.BEGIN_STR + "\n" language_tokens = self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()][PEStrings.TRANSLATION_STR].split(" ") for field in self._version_dict[PEStrings.VAR_FILE_INFO_STR.upper()].keys(): - out_str += "\t\t" + PEStrings.VALUE_STR + " \"" + field + "\",\t" + language_tokens[0] + "," \ - + language_tokens[1] + "\n" + out_str += ( + "\t\t" + + PEStrings.VALUE_STR + + ' "' + + field + + '",\t' + + language_tokens[0] + + "," + + language_tokens[1] + + "\n" + ) out_str += "\t" + PEStrings.END_STR + "\n" + PEStrings.END_STR + "\n#endif" diff --git a/edk2toolext/versioninfo/versioninfo_tool.py b/edk2toolext/versioninfo/versioninfo_tool.py index 53678091..7cc36a64 100644 --- a/edk2toolext/versioninfo/versioninfo_tool.py +++ b/edk2toolext/versioninfo/versioninfo_tool.py @@ -46,24 +46,38 @@ """ % (TOOL_VERSION, os.path.basename(sys.argv[0]), os.path.basename(sys.argv[0])) -def get_cli_options(args:typing.Sequence[str]=None) -> argparse.Namespace: +def get_cli_options(args: typing.Sequence[str] = None) -> argparse.Namespace: """Parse options from the command line. Will parse the primary options from the command line. If provided, will take the options as an array in the first parameter """ parser = argparse.ArgumentParser(description=TOOL_DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('input_file', type=str, - help='a filesystem path to a json/PE file to load') - parser.add_argument('output_file', type=str, - help='a filesystem path to the output file. if file does not exist, entire directory path will be created. if file does exist, contents will be overwritten') # noqa + parser.add_argument("input_file", type=str, help="a filesystem path to a json/PE file to load") + parser.add_argument( + "output_file", + type=str, + help="a filesystem path to the output file. if file does not exist, entire directory path will be created. if file does exist, contents will be overwritten", + ) # noqa command_group = parser.add_mutually_exclusive_group() - command_group.add_argument('-e', '--encode', action='store_const', const='e', dest='mode', - help='(default) outputs VERSIONINFO.rc of given json file') - command_group.add_argument('-d', '--dump', action='store_const', dest='mode', const='d', - help='outputs json file of VERSIONINFO given PE file') - parser.set_defaults(mode='e') + command_group.add_argument( + "-e", + "--encode", + action="store_const", + const="e", + dest="mode", + help="(default) outputs VERSIONINFO.rc of given json file", + ) + command_group.add_argument( + "-d", + "--dump", + action="store_const", + dest="mode", + const="d", + help="outputs json file of VERSIONINFO given PE file", + ) + parser.set_defaults(mode="e") return parser.parse_args(args=args) @@ -136,11 +150,11 @@ def main() -> None: logging.error("Could not find " + args.input_file) sys.exit(1) - if args.mode == 'd': + if args.mode == "d": # we need to dump if not decode_version_info_dump_json(args.input_file, args.output_file): sys.exit(1) - elif args.mode == 'e': + elif args.mode == "e": # we need to encode if not encode_version_info_dump_rc(args.input_file, args.output_file): sys.exit(1) @@ -150,5 +164,5 @@ def main() -> None: sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/edk2toolext/windows/policy/firmware_policy_tool.py b/edk2toolext/windows/policy/firmware_policy_tool.py index 5bfdb92e..c01e4aea 100644 --- a/edk2toolext/windows/policy/firmware_policy_tool.py +++ b/edk2toolext/windows/policy/firmware_policy_tool.py @@ -16,7 +16,7 @@ def PrintPolicy(filename: str) -> None: """Attempts to parse filename as a Windows Firmware Policy and print it.""" try: - with open(filename, 'rb') as f: + with open(filename, "rb") as f: policy = FirmwarePolicy(fs=f) policy.Print() @@ -24,20 +24,23 @@ def PrintPolicy(filename: str) -> None: print('ERROR: File not found: "{0}"'.format(filename)) -def CreatePolicyFromParameters(filename: str, manufacturer: str, product: str, - sn: str, nonce: int, oem1: str, oem2: str, devicePolicy: int) -> None: +def CreatePolicyFromParameters( + filename: str, manufacturer: str, product: str, sn: str, nonce: int, oem1: str, oem2: str, devicePolicy: int +) -> None: """Populates a Windows FirmwarePolicy object with the provided parameters and serializes it to filename. WARNING: Filename must be a new file, will not overwrite existing files. """ - with open(filename, 'xb') as f: + with open(filename, "xb") as f: policy = FirmwarePolicy() - TargetInfo = {'Manufacturer': manufacturer, - 'Product': product, - 'SerialNumber': sn, - 'OEM_01': oem1, - 'OEM_02': oem2, - 'Nonce': nonce} + TargetInfo = { + "Manufacturer": manufacturer, + "Product": product, + "SerialNumber": sn, + "OEM_01": oem1, + "OEM_02": oem2, + "Nonce": nonce, + } policy.SetDeviceTarget(TargetInfo) policy.SetDevicePolicy(devicePolicy) policy.SerializeToStream(stream=f) @@ -46,43 +49,63 @@ def CreatePolicyFromParameters(filename: str, manufacturer: str, product: str, def main() -> None: """Parses command-line parameters using ArgumentParser, passing them to helper functions to perform the requests.""" - parser = argparse.ArgumentParser(description='Firmware Policy Tool') - subparsers = parser.add_subparsers(required=True, dest='action') + parser = argparse.ArgumentParser(description="Firmware Policy Tool") + subparsers = parser.add_subparsers(required=True, dest="action") - parser_create = subparsers.add_parser('create', help='Create a firmware policy') - parser_create.add_argument('PolicyFilename', type=str, help='The name of the new binary policy file to create ' - '- will not overwrite existing files') + parser_create = subparsers.add_parser("create", help="Create a firmware policy") parser_create.add_argument( - 'Manufacturer', type=str, help='Manufacturer Name, for example, "Contoso Computers, LLC". ' - 'Should match the EV Certificate Subject CN="Manufacturer"') - parser_create.add_argument('Product', type=str, help='Product Name, for example, "Laptop Foo"') + "PolicyFilename", + type=str, + help="The name of the new binary policy file to create " "- will not overwrite existing files", + ) parser_create.add_argument( - 'SerialNumber', type=str, help='Serial Number, for example "F0013-000243546-X02". Should match ' - 'SmbiosSystemSerialNumber, SMBIOS System Information (Type 1 Table) -> Serial Number') - parser_create.add_argument('NonceHex', type=str, help='The nonce in hexadecimal, for example "0x0123456789abcdef"') - parser_create.add_argument('--OEM1', type=str, default='', - help='Optional OEM Field 1, an arbitrary length string, for example "ODM foo"') - parser_create.add_argument('--OEM2', type=str, default='', help='Optional OEM Field 2, an arbitrary length string') - parser_create.add_argument('DevicePolicyHex', type=str, help='The device policy in hexadecimal,' - ' for example to clear the TPM and delete Secure Boot keys: 0x3') - - parser_print = subparsers.add_parser('parse', help='Parse a firmware policy and print in human readable form') - parser_print.add_argument('filename', help='Filename to parse and print') + "Manufacturer", + type=str, + help='Manufacturer Name, for example, "Contoso Computers, LLC". ' + 'Should match the EV Certificate Subject CN="Manufacturer"', + ) + parser_create.add_argument("Product", type=str, help='Product Name, for example, "Laptop Foo"') + parser_create.add_argument( + "SerialNumber", + type=str, + help='Serial Number, for example "F0013-000243546-X02". Should match ' + "SmbiosSystemSerialNumber, SMBIOS System Information (Type 1 Table) -> Serial Number", + ) + parser_create.add_argument("NonceHex", type=str, help='The nonce in hexadecimal, for example "0x0123456789abcdef"') + parser_create.add_argument( + "--OEM1", type=str, default="", help='Optional OEM Field 1, an arbitrary length string, for example "ODM foo"' + ) + parser_create.add_argument("--OEM2", type=str, default="", help="Optional OEM Field 2, an arbitrary length string") + parser_create.add_argument( + "DevicePolicyHex", + type=str, + help="The device policy in hexadecimal," " for example to clear the TPM and delete Secure Boot keys: 0x3", + ) + + parser_print = subparsers.add_parser("parse", help="Parse a firmware policy and print in human readable form") + parser_print.add_argument("filename", help="Filename to parse and print") options = parser.parse_args() - print('Options: ', options) + print("Options: ", options) - if options.action == 'create': + if options.action == "create": nonceInt = int(options.NonceHex, 16) devicePolicy = int(options.DevicePolicyHex, 16) - CreatePolicyFromParameters(options.PolicyFilename, options.Manufacturer, - options.Product, options.SerialNumber, nonceInt, - options.OEM1, options.OEM2, devicePolicy) - - elif options.action == 'parse': + CreatePolicyFromParameters( + options.PolicyFilename, + options.Manufacturer, + options.Product, + options.SerialNumber, + nonceInt, + options.OEM1, + options.OEM2, + devicePolicy, + ) + + elif options.action == "parse": PrintPolicy(options.filename) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/edk2toolext/windows/secureboot/secureboot_audit.py b/edk2toolext/windows/secureboot/secureboot_audit.py index 6bb1acf6..47622a12 100644 --- a/edk2toolext/windows/secureboot/secureboot_audit.py +++ b/edk2toolext/windows/secureboot/secureboot_audit.py @@ -44,11 +44,11 @@ # The supported files to retrieve from UEFI and their guids, additional files can be added here SECUREBOOT_FILES = { - "dbx": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID - "db": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID - "dbt": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID - "KEK": "{8BE4DF61-93CA-11d2-AA0D-00E098032B8C}", # EFI_GLOBAL_VARIABLE - "PK": "{8BE4DF61-93CA-11d2-AA0D-00E098032B8C}" # EFI_GLOBAL_VARIABLE + "dbx": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID + "db": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID + "dbt": "{d719b2cb-3d3a-4596-a3bc-dad00e67656f}", # EFI_IMAGE_SECURITY_DATABASE_GUID + "KEK": "{8BE4DF61-93CA-11d2-AA0D-00E098032B8C}", # EFI_GLOBAL_VARIABLE + "PK": "{8BE4DF61-93CA-11d2-AA0D-00E098032B8C}", # EFI_GLOBAL_VARIABLE } KNOWN_CERTIFICATES = [ @@ -150,6 +150,7 @@ def write_xlsx_file(report: dict, output_file: str) -> None: logger.info("Wrote report to %s", output_file) + def convert_row_to_metadata(row: list) -> dict: """Converts a row from the csv to a metadata dictionary. @@ -172,7 +173,7 @@ def map_cve(cve: str) -> str: str: Mapped CVE """ # Intentionally naive mapping of CVE's to the correct format - if 'CVE' in cve.upper(): + if "CVE" in cve.upper(): # this condition could have multiple CVE's in it return cve elif "black lotus" in cve.lower(): @@ -187,9 +188,7 @@ def map_cve(cve: str) -> str: "component": row[2] if row[2] != "" else None, "arch": convert_arch.get(row[3], row[3]), "partner": row[4], - "type": "certificate" - if authenticode_hash in KNOWN_CERTIFICATES - else "authenticode", + "type": "certificate" if authenticode_hash in KNOWN_CERTIFICATES else "authenticode", "cves": map_cve(row[5]), "date": row[6].replace("\n", ""), "authority": None, @@ -302,9 +301,7 @@ def generate_dbx_report(dbx_fs: BinaryIO, revocations: dict) -> dict: # Is this in our revocation list? if formatted_signature in revocations: - report["identified"]["dict"][formatted_signature] = revocations[ - formatted_signature - ] + report["identified"]["dict"][formatted_signature] = revocations[formatted_signature] report["identified"]["dict"][formatted_signature]["count"] = 1 # if we have identified the revocation, remove it from the list @@ -323,9 +320,7 @@ def generate_dbx_report(dbx_fs: BinaryIO, revocations: dict) -> dict: # Add totals report["identified"]["total"] = len(report["identified"]["dict"]) report["not_found"]["total"] = len(report["not_found"]["list"]) - report["missing_protections"]["total"] = len( - report["missing_protections"]["dict"] - ) + report["missing_protections"]["total"] = len(report["missing_protections"]["dict"]) # Check for flat hashes for supposed_authenticode_hash in report["not_found"]["list"]: @@ -334,14 +329,12 @@ def generate_dbx_report(dbx_fs: BinaryIO, revocations: dict) -> dict: break if supposed_authenticode_hash == revocations[supposed_flat_hash]["flat_hash_sha256"]: - logger.warning( - "This hash appears to be a flat sha256 hash: %s", supposed_authenticode_hash - ) + logger.warning("This hash appears to be a flat sha256 hash: %s", supposed_authenticode_hash) return report -def filter_revocation_list_by_arch(revocations: dict, filter_by_arch: str=None) -> dict: +def filter_revocation_list_by_arch(revocations: dict, filter_by_arch: str = None) -> dict: """Filters the revocation list by architecture. Args: @@ -362,6 +355,7 @@ def filter_revocation_list_by_arch(revocations: dict, filter_by_arch: str=None) return revocations + ################################################################################################### # Classes ################################################################################################### @@ -373,25 +367,19 @@ class FirmwareVariables(object): def __init__(self) -> None: """Constructor.""" # enable required SeSystemEnvironmentPrivilege privilege - privilege = win32security.LookupPrivilegeValue( - None, "SeSystemEnvironmentPrivilege" - ) + privilege = win32security.LookupPrivilegeValue(None, "SeSystemEnvironmentPrivilege") token = win32security.OpenProcessToken( win32process.GetCurrentProcess(), win32security.TOKEN_READ | win32security.TOKEN_ADJUST_PRIVILEGES, ) - win32security.AdjustTokenPrivileges( - token, False, [(privilege, win32security.SE_PRIVILEGE_ENABLED)] - ) + win32security.AdjustTokenPrivileges(token, False, [(privilege, win32security.SE_PRIVILEGE_ENABLED)]) win32api.CloseHandle(token) try: - self._GetFirmwareEnvironmentVariable = ( - KERNEL32.GetFirmwareEnvironmentVariableW - ) + self._GetFirmwareEnvironmentVariable = KERNEL32.GetFirmwareEnvironmentVariableW self._GetFirmwareEnvironmentVariable.restype = ctypes.c_int self._GetFirmwareEnvironmentVariable.argtypes = [ ctypes.c_wchar_p, @@ -414,9 +402,7 @@ def get_variable(self, name: str, guid: str) -> bytes: The value of the variable """ if self._GetFirmwareEnvironmentVariable is None: - raise NotImplementedError( - "GetFirmwareEnvironmentVariable is not implemented" - ) + raise NotImplementedError("GetFirmwareEnvironmentVariable is not implemented") buffer = ctypes.create_string_buffer(EFI_VAR_MAX_BUFFER_SIZE) buffer_size = ctypes.c_int(EFI_VAR_MAX_BUFFER_SIZE) @@ -455,9 +441,7 @@ def get_secureboot_files(args: argparse.Namespace) -> int: # To make this cross platform, we need to make FirmwareVariables support other platforms fw_vars = FirmwareVariables() - var = fw_vars.get_variable( - args.secureboot_file, SECUREBOOT_FILES[args.secureboot_file] - ) + var = fw_vars.get_variable(args.secureboot_file, SECUREBOOT_FILES[args.secureboot_file]) output_file = os.path.join(args.output, f"{args.secureboot_file}.bin") @@ -486,7 +470,6 @@ def parse_dbx(args: argparse.Namespace) -> int: revocations = json.loads(rev_fs.read()) with open(args.dbx_file, "rb") as dbx_fs: - revocations = filter_revocation_list_by_arch(revocations, args.filter_by_arch) report = generate_dbx_report(dbx_fs, revocations) @@ -502,7 +485,8 @@ def parse_dbx(args: argparse.Namespace) -> int: # Command Line Parsing Functions ################################################################################################### -def valid_file(param: str, valid_extensions: tuple=(".csv", ".xlsx")) -> str: + +def valid_file(param: str, valid_extensions: tuple = (".csv", ".xlsx")) -> str: """Checks if a file is valid. Args: @@ -514,7 +498,7 @@ def valid_file(param: str, valid_extensions: tuple=(".csv", ".xlsx")) -> str: """ base, ext = os.path.splitext(param) if ext.lower() not in valid_extensions: - raise argparse.ArgumentTypeError('File must be one of the following types: {}'.format(valid_extensions)) + raise argparse.ArgumentTypeError("File must be one of the following types: {}".format(valid_extensions)) return param @@ -546,8 +530,7 @@ def setup_parse_dbx(subparsers: argparse._SubParsersAction) -> argparse._SubPars parser.add_argument( "--output", - help="Output file to write the dbx contents to" - + " (note: extension will be based on the format)", + help="Output file to write the dbx contents to" + " (note: extension will be based on the format)", default=os.path.join(DEFAULT_OUTPUT_FOLDER, "dbx_report"), ) @@ -560,6 +543,7 @@ def setup_parse_dbx(subparsers: argparse._SubParsersAction) -> argparse._SubPars return subparsers + def setup_parse_uefi_org_files(subparsers: argparse._SubParsersAction) -> argparse._SubParsersAction: """Setup the parse_uefi_org_files subparser. diff --git a/tests.unit/capsule/test_capsule_helper.py b/tests.unit/capsule/test_capsule_helper.py index c6057a63..819d7289 100644 --- a/tests.unit/capsule/test_capsule_helper.py +++ b/tests.unit/capsule/test_capsule_helper.py @@ -18,23 +18,20 @@ from edk2toolext.capsule import capsule_helper DUMMY_OPTIONS = { - 'capsule': { - 'fw_version': '0xDEADBEEF', - 'lsv_version': '0xFEEDF00D', - 'esrt_guid': '00112233-4455-6677-8899-aabbccddeeff', - 'fw_name': 'TEST_FW', - 'fw_version_string': '1.2.3', # deliberately use 3-part version to exercise version normalization. - 'provider_name': 'TESTER', - 'fw_description': 'TEST FW', - 'fw_integrity_file': "IntegrityFile.bin" + "capsule": { + "fw_version": "0xDEADBEEF", + "lsv_version": "0xFEEDF00D", + "esrt_guid": "00112233-4455-6677-8899-aabbccddeeff", + "fw_name": "TEST_FW", + "fw_version_string": "1.2.3", # deliberately use 3-part version to exercise version normalization. + "provider_name": "TESTER", + "fw_description": "TEST FW", + "fw_integrity_file": "IntegrityFile.bin", }, - 'signer': { - 'option2': 'value2', - 'option_not': 'orig_value' - } + "signer": {"option2": "value2", "option_not": "orig_value"}, } -DUMMY_OPTIONS_FILE_NAME = 'dummy_options_file' -DUMMY_PAYLOAD_FILE_NAME = 'dummy_payload' +DUMMY_OPTIONS_FILE_NAME = "dummy_options_file" +DUMMY_PAYLOAD_FILE_NAME = "dummy_payload" class CapsuleSignerTest(unittest.TestCase): @@ -45,26 +42,26 @@ def setUpClass(cls): cls.temp_dir = tempfile.mkdtemp() cls.dummy_payload = os.path.join(cls.temp_dir, DUMMY_PAYLOAD_FILE_NAME + ".bin") - with open(cls.dummy_payload, 'wb') as dummy_file: - dummy_file.write(b'DEADBEEF') + with open(cls.dummy_payload, "wb") as dummy_file: + dummy_file.write(b"DEADBEEF") def test_should_pass_wrapped_blob_to_signing_module(self): - dummy_payload = b'This_Is_My_Sample_Payload,ThereAreManyLikeIt;This One Is Mine' + dummy_payload = b"This_Is_My_Sample_Payload,ThereAreManyLikeIt;This One Is Mine" class DummySigner(object): @classmethod def sign(cls, data, signature_options, signer_options): self.assertTrue(dummy_payload in data) - capsule_helper.build_capsule(dummy_payload, DUMMY_OPTIONS['capsule'], DummySigner, DUMMY_OPTIONS['signer']) + capsule_helper.build_capsule(dummy_payload, DUMMY_OPTIONS["capsule"], DummySigner, DUMMY_OPTIONS["signer"]) def test_should_pass_signer_options_to_signing_module(self): class DummySigner(object): @classmethod def sign(cls, data, signature_options, signer_options): - self.assertEqual(signer_options, DUMMY_OPTIONS['signer']) + self.assertEqual(signer_options, DUMMY_OPTIONS["signer"]) - capsule_helper.build_capsule(b'030303', DUMMY_OPTIONS['capsule'], DummySigner, DUMMY_OPTIONS['signer']) + capsule_helper.build_capsule(b"030303", DUMMY_OPTIONS["capsule"], DummySigner, DUMMY_OPTIONS["signer"]) # def test_should_be_able_to_generate_a_production_equivalent_capsule(self): # with open(BUILD_CAPSULE_BINARY_PATH, 'rb') as data_file: @@ -129,12 +126,12 @@ def setUpClass(cls): cls.temp_dir = tempfile.mkdtemp() cls.dummy_payload = os.path.join(cls.temp_dir, DUMMY_PAYLOAD_FILE_NAME + ".bin") - with open(cls.dummy_payload, 'wb') as dummy_file: - dummy_file.write(b'DEADBEEF') + with open(cls.dummy_payload, "wb") as dummy_file: + dummy_file.write(b"DEADBEEF") def test_should_be_able_to_save_a_capsule(self): fmp_capsule_image_header = FmpCapsuleImageHeaderClass() - fmp_capsule_image_header.UpdateImageTypeId = uuid.UUID(DUMMY_OPTIONS['capsule']['esrt_guid']) + fmp_capsule_image_header.UpdateImageTypeId = uuid.UUID(DUMMY_OPTIONS["capsule"]["esrt_guid"]) fmp_capsule_image_header.UpdateImageIndex = 1 fmp_capsule_header = FmpCapsuleHeaderClass() @@ -145,26 +142,25 @@ def test_should_be_able_to_save_a_capsule(self): uefi_capsule_header.PersistAcrossReset = True uefi_capsule_header.InitiateReset = True - capsule_file_path = capsule_helper.save_capsule(uefi_capsule_header, DUMMY_OPTIONS['capsule'], self.temp_dir) + capsule_file_path = capsule_helper.save_capsule(uefi_capsule_header, DUMMY_OPTIONS["capsule"], self.temp_dir) # Now read the data and check for the GUID. - with open(capsule_file_path, 'rb') as capsule_file: + with open(capsule_file_path, "rb") as capsule_file: capsule_bytes = capsule_file.read() - self.assertTrue(uuid.UUID(DUMMY_OPTIONS['capsule']['esrt_guid']).bytes_le in capsule_bytes) + self.assertTrue(uuid.UUID(DUMMY_OPTIONS["capsule"]["esrt_guid"]).bytes_le in capsule_bytes) def test_should_be_able_to_generate_windows_files(self): - inf_file_path = capsule_helper.create_inf_file(DUMMY_OPTIONS['capsule'], self.temp_dir) + inf_file_path = capsule_helper.create_inf_file(DUMMY_OPTIONS["capsule"], self.temp_dir) self.assertTrue(os.path.isfile(inf_file_path)) @unittest.skip("test fails in unittest environment. need to debug") def test_should_be_able_to_generate_cat(self): - cat_file_path = capsule_helper.create_cat_file(DUMMY_OPTIONS['capsule'], self.temp_dir) + cat_file_path = capsule_helper.create_cat_file(DUMMY_OPTIONS["capsule"], self.temp_dir) self.assertTrue(os.path.isfile(cat_file_path)) class MultiNodeFileGenerationTest(unittest.TestCase): - @staticmethod def buildPayload(esrt): fmp_capsule_image_header = FmpCapsuleImageHeaderClass() @@ -198,7 +194,7 @@ def setUpClass(cls): "test1.bin", uuid.UUID("ea5c13fe-cac9-4fd7-ac30-37709bd668f2"), 0xDEADBEEF, - "TEST FW" + "TEST FW", ) ) @@ -208,24 +204,22 @@ def setUpClass(cls): "test2.bin", uuid.UUID("43e67b4e-b2f1-4891-9ff2-a6acd9c74cbd"), 0xDEADBEEF, - "TEST FW" + "TEST FW", ) ) def test_should_be_able_to_save_a_multi_node_capsule(self): - capsule_file_path = capsule_helper.save_multinode_capsule(self.capsule, self.temp_output_dir) # make sure all the files we expect got created for payload in self.capsule.payloads: payload_file = os.path.join(capsule_file_path, payload.payload_filename) self.assertTrue(os.path.isfile(payload_file)) - with open(payload_file, 'rb') as fh: + with open(payload_file, "rb") as fh: capsule_bytes = fh.read() self.assertIn(payload.esrt_guid.bytes_le, capsule_bytes) def test_should_be_able_to_save_a_multi_node_capsule_with_integrity(self): - self.capsule.payloads[0].integrity_data = uuid.UUID("ea5c13fe-cac9-4fd7-ac30-37709bd668f2").bytes self.capsule.payloads[0].integrity_filename = "integrity1.bin" @@ -237,13 +231,13 @@ def test_should_be_able_to_save_a_multi_node_capsule_with_integrity(self): for payload in self.capsule.payloads: payload_file = os.path.join(capsule_file_path, payload.payload_filename) self.assertTrue(os.path.isfile(payload_file)) - with open(payload_file, 'rb') as fh: + with open(payload_file, "rb") as fh: capsule_bytes = fh.read() self.assertIn(payload.esrt_guid.bytes_le, capsule_bytes) integrityFile = os.path.join(capsule_file_path, payload.integrity_filename) self.assertTrue(os.path.isfile(integrityFile)) - with open(integrityFile, 'rb') as fh: + with open(integrityFile, "rb") as fh: integrity_bytes = fh.read() self.assertIn(payload.integrity_data, integrity_bytes) @@ -254,10 +248,9 @@ def test_should_be_able_to_save_a_multi_node_capsule_with_integrity(self): self.capsule.payloads[1].integrity_filename = None def test_should_be_able_to_generate_multi_node_inf_file(self): - inf_file_path = capsule_helper.create_multinode_inf_file(self.capsule, self.temp_output_dir) self.assertTrue(os.path.isfile(inf_file_path)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/capsule/test_capsule_tool.py b/tests.unit/capsule/test_capsule_tool.py index ac28bee0..635d1499 100644 --- a/tests.unit/capsule/test_capsule_tool.py +++ b/tests.unit/capsule/test_capsule_tool.py @@ -16,17 +16,9 @@ from edk2toolext.capsule import capsule_tool -DUMMY_OPTIONS = { - 'capsule': { - 'option1': 'value1' - }, - 'signer': { - 'option2': 'value2', - 'option_not': 'orig_value' - } -} -DUMMY_OPTIONS_FILE_NAME = 'dummy_options_file' -DUMMY_PAYLOAD_FILE_NAME = 'dummy_payload' +DUMMY_OPTIONS = {"capsule": {"option1": "value1"}, "signer": {"option2": "value2", "option_not": "orig_value"}} +DUMMY_OPTIONS_FILE_NAME = "dummy_options_file" +DUMMY_PAYLOAD_FILE_NAME = "dummy_payload" class ParameterParsingTest(unittest.TestCase): @@ -39,96 +31,94 @@ def setUpClass(cls): cls.dummy_yaml_options = os.path.join(cls.temp_dir, DUMMY_OPTIONS_FILE_NAME + ".yaml") cls.dummy_payload = os.path.join(cls.temp_dir, DUMMY_PAYLOAD_FILE_NAME + ".bin") - with open(cls.dummy_json_options, 'w') as dummy_file: + with open(cls.dummy_json_options, "w") as dummy_file: json.dump(DUMMY_OPTIONS, dummy_file) - with open(cls.dummy_yaml_options, 'w') as dummy_file: + with open(cls.dummy_yaml_options, "w") as dummy_file: yaml.dump(DUMMY_OPTIONS, dummy_file) - with open(cls.dummy_payload, 'wb') as dummy_file: - dummy_file.write(b'DEADBEEF') + with open(cls.dummy_payload, "wb") as dummy_file: + dummy_file.write(b"DEADBEEF") @unittest.skip("test is incomplete") def test_should_require_a_signer_option(self): pass def test_capsule_options_should_be_passable(self): - cli_params = ['--builtin_signer', 'pyopenssl'] + cli_params = ["--builtin_signer", "pyopenssl"] parsed_args = capsule_tool.get_cli_options(cli_params + [self.dummy_payload, self.temp_dir]) self.assertEqual(len(parsed_args.capsule_options), 0) - cli_params += ['-dc', 'option1=value1'] - cli_params += ['-dc', 'option2=value2'] + cli_params += ["-dc", "option1=value1"] + cli_params += ["-dc", "option2=value2"] cli_params += [self.dummy_payload, self.temp_dir] parsed_args = capsule_tool.get_cli_options(cli_params) self.assertEqual(len(parsed_args.capsule_options), 2) def test_signer_options_should_be_passable(self): - cli_params = ['--builtin_signer', 'pyopenssl'] + cli_params = ["--builtin_signer", "pyopenssl"] parsed_args = capsule_tool.get_cli_options(cli_params + [self.dummy_payload, self.temp_dir]) self.assertEqual(len(parsed_args.signer_options), 0) - cli_params += ['-ds', 'option1=value1'] - cli_params += ['-ds', 'option2=value2'] + cli_params += ["-ds", "option1=value1"] + cli_params += ["-ds", "option2=value2"] cli_params += [self.dummy_payload, self.temp_dir] parsed_args = capsule_tool.get_cli_options(cli_params) self.assertEqual(len(parsed_args.signer_options), 2) def test_should_not_accept_an_invalid_path(self): - cli_params = ['--builtin_signer', 'pyopenssl'] - cli_params += ['-o', 'not_a_path.bin'] + cli_params = ["--builtin_signer", "pyopenssl"] + cli_params += ["-o", "not_a_path.bin"] cli_params += [self.dummy_payload] with self.assertRaises(SystemExit): capsule_tool.get_cli_options(cli_params) def test_should_not_load_an_invalid_path(self): - bad_path = 'not_a_path.bin' + bad_path = "not_a_path.bin" loaded_options = capsule_tool.load_options_file(bad_path) self.assertEqual(loaded_options, None) def test_options_file_should_load_json(self): - with open(self.dummy_json_options, 'r') as options_file: + with open(self.dummy_json_options, "r") as options_file: loaded_options = capsule_tool.load_options_file(options_file) - self.assertEqual(loaded_options['capsule']['option1'], 'value1') - self.assertEqual(loaded_options['signer']['option2'], 'value2') + self.assertEqual(loaded_options["capsule"]["option1"], "value1") + self.assertEqual(loaded_options["signer"]["option2"], "value2") def test_options_file_should_load_yaml(self): - with open(self.dummy_yaml_options, 'r') as options_file: + with open(self.dummy_yaml_options, "r") as options_file: loaded_options = capsule_tool.load_options_file(options_file) - self.assertEqual(loaded_options['capsule']['option1'], 'value1') - self.assertEqual(loaded_options['signer']['option2'], 'value2') + self.assertEqual(loaded_options["capsule"]["option1"], "value1") + self.assertEqual(loaded_options["signer"]["option2"], "value2") # @pytest.mark.skip(reason="test is incomplete") def test_cli_options_should_override_file_options(self): - capsule_cli_options = ['option1=value2', 'new_option=value3'] - signer_cli_options = ['option2=value7', 'option2=value8'] + capsule_cli_options = ["option1=value2", "new_option=value3"] + signer_cli_options = ["option2=value7", "option2=value8"] final_options = capsule_tool.update_options(DUMMY_OPTIONS, capsule_cli_options, signer_cli_options) - self.assertEqual(final_options['capsule']['option1'], 'value2') - self.assertEqual(final_options['capsule']['new_option'], 'value3') - self.assertEqual(final_options['signer']['option2'], 'value8') - self.assertEqual(final_options['signer']['option_not'], 'orig_value') + self.assertEqual(final_options["capsule"]["option1"], "value2") + self.assertEqual(final_options["capsule"]["new_option"], "value3") + self.assertEqual(final_options["signer"]["option2"], "value8") + self.assertEqual(final_options["signer"]["option_not"], "orig_value") def test_full_options_path_should_work(self): # Parse the command parameters. - cli_params = ['--builtin_signer', 'pyopenssl'] - cli_params += ['-o', self.dummy_json_options] - cli_params += ['-dc', 'option1=value2'] - cli_params += ['-dc', 'new_option=value3'] - cli_params += ['-ds', 'option2=value7'] - cli_params += ['-ds', 'option2=value8'] + cli_params = ["--builtin_signer", "pyopenssl"] + cli_params += ["-o", self.dummy_json_options] + cli_params += ["-dc", "option1=value2"] + cli_params += ["-dc", "new_option=value3"] + cli_params += ["-ds", "option2=value7"] + cli_params += ["-ds", "option2=value8"] cli_params += [self.dummy_payload, self.temp_dir] parsed_args = capsule_tool.get_cli_options(cli_params) loaded_options = capsule_tool.load_options_file(parsed_args.options_file) final_options = capsule_tool.update_options( - loaded_options, - parsed_args.capsule_options, - parsed_args.signer_options + loaded_options, parsed_args.capsule_options, parsed_args.signer_options ) - self.assertEqual(final_options['capsule']['option1'], 'value2') - self.assertEqual(final_options['capsule']['new_option'], 'value3') - self.assertEqual(final_options['signer']['option2'], 'value8') - self.assertEqual(final_options['signer']['option_not'], 'orig_value') + self.assertEqual(final_options["capsule"]["option1"], "value2") + self.assertEqual(final_options["capsule"]["new_option"], "value3") + self.assertEqual(final_options["signer"]["option2"], "value8") + self.assertEqual(final_options["signer"]["option_not"], "orig_value") diff --git a/tests.unit/capsule/test_signing_helper.py b/tests.unit/capsule/test_signing_helper.py index 808faeb0..e5289d91 100644 --- a/tests.unit/capsule/test_signing_helper.py +++ b/tests.unit/capsule/test_signing_helper.py @@ -18,23 +18,23 @@ class SignerLocationTests(unittest.TestCase): def test_should_be_able_to_fetch_a_builtin_signer_module(self): py_signer = signing_helper.get_signer(signing_helper.PYOPENSSL_SIGNER) - self.assertTrue(hasattr(py_signer, 'sign')) + self.assertTrue(hasattr(py_signer, "sign")) signtoolsigner = signing_helper.get_signer(signing_helper.SIGNTOOL_SIGNER) - self.assertTrue(hasattr(signtoolsigner, 'sign')) + self.assertTrue(hasattr(signtoolsigner, "sign")) def test_should_be_able_to_pass_a_signing_module(self): py_signer = signing_helper.get_signer( - signing_helper.PYPATH_MODULE_SIGNER, - 'edk2toolext.capsule.pyopenssl_signer' + signing_helper.PYPATH_MODULE_SIGNER, "edk2toolext.capsule.pyopenssl_signer" ) - self.assertTrue(hasattr(py_signer, 'sign')) + self.assertTrue(hasattr(py_signer, "sign")) def test_should_be_able_to_fetch_a_user_provided_signer_module(self): py_signer_path = pyopenssl_signer.__file__ self.assertTrue(os.path.isfile(py_signer_path)) py_signer = signing_helper.get_signer(signing_helper.LOCAL_MODULE_SIGNER, py_signer_path) - self.assertTrue(hasattr(py_signer, 'sign')) + self.assertTrue(hasattr(py_signer, "sign")) + # NOTE: These tests may not run on non-Windows or without the WDK installed. # class SigntoolSignerModuleTest(unittest.TestCase): diff --git a/tests.unit/test_az_cli_universal_dependency.py b/tests.unit/test_az_cli_universal_dependency.py index a5fa27a4..10818334 100644 --- a/tests.unit/test_az_cli_universal_dependency.py +++ b/tests.unit/test_az_cli_universal_dependency.py @@ -25,7 +25,7 @@ test_dir = None -single_file_json_template = ''' +single_file_json_template = """ { "scope": "global", "type": "az-universal", @@ -36,9 +36,9 @@ "feed": "ext_dep_unit_test_feed", "pat_var": "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" } -''' +""" -folders_json_template = ''' +folders_json_template = """ { "scope": "global", "type": "az-universal", @@ -49,9 +49,9 @@ "feed": "ext_dep_unit_test_feed", "pat_var": "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" } -''' +""" -file_filter_json_template = ''' +file_filter_json_template = """ { "scope": "global", "type": "az-universal", @@ -63,9 +63,9 @@ "file-filter": "folder2/*.txt", "pat_var": "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" } -''' +""" -zip_json_template = ''' +zip_json_template = """ { "scope": "global", "type": "az-universal", @@ -78,9 +78,9 @@ "internal_path": "hello-world-zip", "pat_var": "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" } -''' +""" -zip_json_template2 = ''' +zip_json_template2 = """ { "scope": "global", "type": "az-universal", @@ -93,7 +93,7 @@ "internal_path": "/", "pat_var": "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" } -''' +""" def prep_workspace(): @@ -123,7 +123,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -136,8 +136,10 @@ def tearDown(self): version_aggregator.GetVersionAggregator().Reset() # good case - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_good_universal_dependency_single_file(self): version = "0.0.1" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -153,8 +155,10 @@ def test_download_good_universal_dependency_single_file(self): ext_dep.clean() # good case - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_good_universal_dependency_folders_pinned_old_version(self): version = "0.2.0" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -170,8 +174,10 @@ def test_download_good_universal_dependency_folders_pinned_old_version(self): ext_dep.clean() # good case - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_good_universal_dependency_folders_newer_version(self): version = "0.2.1" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -187,8 +193,10 @@ def test_download_good_universal_dependency_folders_newer_version(self): ext_dep.clean() # good case - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_good_universal_dependency_folders_file_filter(self): version = "0.2.1" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -206,7 +214,7 @@ def test_download_good_universal_dependency_folders_file_filter(self): files = 0 folders = 0 - for (dirpath, dirs, file_names) in os.walk(ext_dep.contents_dir): + for dirpath, dirs, file_names in os.walk(ext_dep.contents_dir): files += len(file_names) folders += len(dirs) @@ -217,8 +225,10 @@ def test_download_good_universal_dependency_folders_file_filter(self): ext_dep.clean() # bad case - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_bad_universal_dependency(self): non_existing_version = "0.1.0" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -231,8 +241,10 @@ def test_download_bad_universal_dependency(self): ext_dep.fetch() self.assertFalse(ext_dep.verify()) - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_and_unzip(self): version = "0.0.1" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -247,20 +259,22 @@ def test_download_and_unzip(self): files = 0 folders = 0 - for (_, dirs, file_names) in os.walk(ext_dep.contents_dir): + for _, dirs, file_names in os.walk(ext_dep.contents_dir): for file in file_names: - assert file in ['extdep_state.yaml', 'helloworld.txt'] + assert file in ["extdep_state.yaml", "helloworld.txt"] files += len(file_names) folders += len(dirs) - self.assertEqual(files, 2) # yaml file and moved files. + self.assertEqual(files, 2) # yaml file and moved files. self.assertEqual(folders, 0) ext_dep.clean() - @unittest.skipIf("PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), - "PAT not defined therefore universal packages tests will fail") + @unittest.skipIf( + "PAT_FOR_UNIVERSAL_ORG_TIANOCORE" not in os.environ.keys(), + "PAT not defined therefore universal packages tests will fail", + ) def test_download_and_unzip2(self): version = "0.0.1" ext_dep_file_path = os.path.join(test_dir, "unit_test_ext_dep.json") @@ -275,14 +289,14 @@ def test_download_and_unzip2(self): files = 0 folders = 0 - for (_, dirs, file_names) in os.walk(ext_dep.contents_dir): + for _, dirs, file_names in os.walk(ext_dep.contents_dir): for file in file_names: - assert file in ['extdep_state.yaml', 'helloworld.txt'] + assert file in ["extdep_state.yaml", "helloworld.txt"] files += len(file_names) folders += len(dirs) - self.assertEqual(files, 2) # yaml file and moved files. - self.assertEqual(folders, 1) # helloworld.txt is in a folder, because the internal path is "/" + self.assertEqual(files, 2) # yaml file and moved files. + self.assertEqual(folders, 1) # helloworld.txt is in a folder, because the internal path is "/" ext_dep.clean() @@ -290,5 +304,5 @@ def test_az_tool_environment(self): AzureCliUniversalDependency.VerifyToolDependencies() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_ci_build_plugin.py b/tests.unit/test_ci_build_plugin.py index 11c9c448..e793c135 100644 --- a/tests.unit/test_ci_build_plugin.py +++ b/tests.unit/test_ci_build_plugin.py @@ -14,7 +14,6 @@ class TestICiBuildPlugin(unittest.TestCase): - def __init__(self, *args, **kwargs): self.test_dir = None super().__init__(*args, **kwargs) diff --git a/tests.unit/test_codeql.py b/tests.unit/test_codeql.py index 5ad0b6ea..b06b7238 100644 --- a/tests.unit/test_codeql.py +++ b/tests.unit/test_codeql.py @@ -28,28 +28,26 @@ def test_codeql_option(self): self.assertFalse(args.codeql) # Test that the option sets the value to True - args_with_option = parser.parse_args(['--codeql']) + args_with_option = parser.parse_args(["--codeql"]) self.assertTrue(args_with_option.codeql) - @patch('edk2toolext.codeql.GetHostInfo') + @patch("edk2toolext.codeql.GetHostInfo") def test_codeql_enabled_linux(self, mock_host_info): """Tests that the proper scope is returned on a Linux host.""" mock_host_info.return_value.os = "Linux" result = codeql.get_scopes(codeql_enabled=True) - expected_result = ("codeql-linux-ext-dep", "codeql-build", - "codeql-analyze") + expected_result = ("codeql-linux-ext-dep", "codeql-build", "codeql-analyze") self.assertEqual(result, expected_result) - @patch('edk2toolext.codeql.GetHostInfo') + @patch("edk2toolext.codeql.GetHostInfo") def test_codeql_enabled_windows(self, mock_host_info): """Tests that the proper scope is returned on a Windows host.""" mock_host_info.return_value.os = "Windows" result = codeql.get_scopes(codeql_enabled=True) - expected_result = ("codeql-windows-ext-dep", "codeql-build", - "codeql-analyze") + expected_result = ("codeql-windows-ext-dep", "codeql-build", "codeql-analyze") self.assertEqual(result, expected_result) - @patch('edk2toolext.codeql.GetHostInfo') + @patch("edk2toolext.codeql.GetHostInfo") def test_codeql_disabled(self, mock_host_info): """Tests that the proper scopes are returned if CodeQL is disabled.""" result = codeql.get_scopes(codeql_enabled=False) @@ -72,7 +70,4 @@ def test_set_audit_only_mode(self): """Tests that CodeQL audit mode is enabled as expected.""" mock_uefi_builder = Mock() codeql.set_audit_only_mode(mock_uefi_builder) - mock_uefi_builder.env.SetValue.assert_called_once_with( - "STUART_CODEQL_AUDIT_ONLY", - "true", - "Platform Defined") + mock_uefi_builder.env.SetValue.assert_called_once_with("STUART_CODEQL_AUDIT_ONLY", "true", "Platform Defined") diff --git a/tests.unit/test_conf_mgmt.py b/tests.unit/test_conf_mgmt.py index 2b50e7fc..ff089a55 100644 --- a/tests.unit/test_conf_mgmt.py +++ b/tests.unit/test_conf_mgmt.py @@ -13,7 +13,7 @@ import logging from edk2toolext.environment import conf_mgmt -test_file_text = '''# +test_file_text = """# # Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.
# Portions copyright (c) 2011 - 2014, ARM Ltd. All rights reserved.
@@ -39,7 +39,7 @@ # common path macros DEFINE VS2008_BIN = ENV(VS2008_PREFIX) -''' +""" class TestConfMgmt(unittest.TestCase): @@ -114,8 +114,7 @@ def test_no_version_tag(self): self.assertEqual(c, "0.0") def test_invalid_version(self): - invalid_versions = ["1.2.3.4", "1.2.3", "Hello.1", "1.jk", "", - "Wow", "Unknown"] + invalid_versions = ["1.2.3.4", "1.2.3", "Hello.1", "1.jk", "", "Wow", "Unknown"] p = os.path.join(self.test_dir, "test.txt") for a in invalid_versions: @@ -262,5 +261,5 @@ def test_no_templates(self): conf_mgmt.ConfMgmt().populate_conf_dir(conf, False, [temp]) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_edk2_ci_build.py b/tests.unit/test_edk2_ci_build.py index a6b8f47e..6f85884d 100644 --- a/tests.unit/test_edk2_ci_build.py +++ b/tests.unit/test_edk2_ci_build.py @@ -19,7 +19,6 @@ class TestEdk2CiBuild(unittest.TestCase): - minimalTree = None def setUp(self): @@ -49,11 +48,11 @@ def tearDownClass(cls): @classmethod def restart_logging(cls): - ''' + """ We restart logging as logging is closed at the end of edk2 invocables. We also initialize it at the start. Reloading is the easiest way to get fresh state - ''' + """ logging.shutdown() reload(logging) @@ -73,33 +72,28 @@ def test_ci_build(self): self.assertTrue(os.path.exists(os.path.join(self.minimalTree, "Build"))) def test_merge_config(self): - descriptor = {'descriptor_file': 'C:\\MyRepo\\.pytool\\Plugin\\MyPlugin1\\MyPlugin1_plug_in.yaml', - 'module': 'MyPlugin1', - 'name': 'My Plugin 1', - 'scope': 'cibuild'} + descriptor = { + "descriptor_file": "C:\\MyRepo\\.pytool\\Plugin\\MyPlugin1\\MyPlugin1_plug_in.yaml", + "module": "MyPlugin1", + "name": "My Plugin 1", + "scope": "cibuild", + } global_config = { "MyPlugin1": { - "MySetting1": 'global value 1', - "MySetting2": 'global value 2', + "MySetting1": "global value 1", + "MySetting2": "global value 2", }, - "MyPlugin2": { - "MySetting2": 'global value 2' - } + "MyPlugin2": {"MySetting2": "global value 2"}, } package_config = { - "MyPlugin1": { - "MySetting1": 'package value 1', - "MySetting3": 'package value 3' - }, - "MyPlugin3": { - "MySetting3": 'package value 3' - } + "MyPlugin1": {"MySetting1": "package value 1", "MySetting3": "package value 3"}, + "MyPlugin3": {"MySetting3": "package value 3"}, } merged_config = { - "MySetting1": 'package value 1', - "MySetting2": 'global value 2', - "MySetting3": 'package value 3' + "MySetting1": "package value 1", + "MySetting2": "global value 2", + "MySetting3": "package value 3", } self.assertDictEqual(Edk2CiBuild.merge_config(global_config, {}, descriptor), global_config["MyPlugin1"]) diff --git a/tests.unit/test_edk2_ci_setup.py b/tests.unit/test_edk2_ci_setup.py index 33c49e07..13e8b920 100644 --- a/tests.unit/test_edk2_ci_setup.py +++ b/tests.unit/test_edk2_ci_setup.py @@ -19,7 +19,6 @@ class TestEdk2CiSetup(unittest.TestCase): - minimalTree = None def setUp(self): @@ -40,11 +39,11 @@ def tearDown(self): @classmethod def restart_logging(cls): - ''' + """ We restart logging as logging is closed at the end of edk2 invocables. We also initialize it at the start. Reloading is the easiest way to get fresh state - ''' + """ logging.shutdown() reload(logging) diff --git a/tests.unit/test_edk2_invocable.py b/tests.unit/test_edk2_invocable.py index b6f42903..19b0e13c 100644 --- a/tests.unit/test_edk2_invocable.py +++ b/tests.unit/test_edk2_invocable.py @@ -18,8 +18,7 @@ class TestEdk2Invocable(unittest.TestCase): """Tests for the Edk2Invocable module.""" @classmethod - def _mock_rust_tool_run_cmd_valid(cls, tool_name: str, tool_params: str, - **kwargs: dict[str, any]): + def _mock_rust_tool_run_cmd_valid(cls, tool_name: str, tool_params: str, **kwargs: dict[str, any]): """Returns a set of expected Rust tool versions. Args: @@ -30,20 +29,19 @@ def _mock_rust_tool_run_cmd_valid(cls, tool_name: str, tool_params: str, Returns: int: 0 for successful tool invocation. Non-zero for unsuccessful. """ - if tool_name == 'cargo' and tool_params == '--version': - kwargs['outstream'].write("cargo 1.10.0") + if tool_name == "cargo" and tool_params == "--version": + kwargs["outstream"].write("cargo 1.10.0") return 0 - elif tool_name == 'cargo' and tool_params == 'make --version': - kwargs['outstream'].write("cargo make 0.30.0 (abc1234)") + elif tool_name == "cargo" and tool_params == "make --version": + kwargs["outstream"].write("cargo make 0.30.0 (abc1234)") return 0 - elif tool_name == 'rustc': - kwargs['outstream'].write("rustc 1.10.1") + elif tool_name == "rustc": + kwargs["outstream"].write("rustc 1.10.1") return 0 return 1 @classmethod - def _mock_rust_tool_run_cmd_invalid(cls, tool_name: str, tool_params: str, - **kwargs: dict[str, any]): + def _mock_rust_tool_run_cmd_invalid(cls, tool_name: str, tool_params: str, **kwargs: dict[str, any]): """Returns an unexpected tool version. Args: @@ -54,12 +52,11 @@ def _mock_rust_tool_run_cmd_invalid(cls, tool_name: str, tool_params: str, Returns: int: 0 for successful tool invocation. """ - kwargs['outstream'].write("unknown version format") + kwargs["outstream"].write("unknown version format") return 0 @classmethod - def _mock_rust_tool_run_cmd_missing(cls, tool_name: str, tool_params: str, - **kwargs: dict[str, any]): + def _mock_rust_tool_run_cmd_missing(cls, tool_name: str, tool_params: str, **kwargs: dict[str, any]): """Returns an unexpected tool version. Args: @@ -70,10 +67,10 @@ def _mock_rust_tool_run_cmd_missing(cls, tool_name: str, tool_params: str, Returns: int: 1 indicating an error. """ - kwargs['outstream'].write(" is not a recognized command.") + kwargs["outstream"].write(" is not a recognized command.") return 1 - @patch('edk2toolext.edk2_invocable.RunCmd') + @patch("edk2toolext.edk2_invocable.RunCmd") def test_collect_rust_info_unknown_ver(self, mock_run_cmd: MagicMock): """Verifies a Rust tool with an unknown format raises an exception. @@ -88,14 +85,11 @@ def test_collect_rust_info_unknown_ver(self, mock_run_cmd: MagicMock): with self.assertRaises(Exception) as context: Edk2Invocable.collect_rust_info() - self.assertTrue("version format is unexpected and cannot be parsed" - in str(context.exception)) + self.assertTrue("version format is unexpected and cannot be parsed" in str(context.exception)) - @patch('edk2toolext.edk2_invocable.RunCmd') - @patch('edk2toolext.edk2_invocable.version_aggregator.GetVersionAggregator') - def test_collect_rust_info_missing_tool(self, - mock_get_version_aggregator: MagicMock, - mock_run_cmd: MagicMock): + @patch("edk2toolext.edk2_invocable.RunCmd") + @patch("edk2toolext.edk2_invocable.version_aggregator.GetVersionAggregator") + def test_collect_rust_info_missing_tool(self, mock_get_version_aggregator: MagicMock, mock_run_cmd: MagicMock): """Verifies a missing Rust tool returns N/A. Some repos may not use the Rust and the users of those repos do not @@ -122,11 +116,9 @@ def test_collect_rust_info_missing_tool(self, mock_version_aggregator.ReportVersion.assert_has_calls(calls, any_order=True) - @patch('edk2toolext.edk2_invocable.RunCmd') - @patch('edk2toolext.edk2_invocable.version_aggregator.GetVersionAggregator') - def test_collect_rust_info_known_ver(self, - mock_get_version_aggregator: MagicMock, - mock_run_cmd: MagicMock): + @patch("edk2toolext.edk2_invocable.RunCmd") + @patch("edk2toolext.edk2_invocable.version_aggregator.GetVersionAggregator") + def test_collect_rust_info_known_ver(self, mock_get_version_aggregator: MagicMock, mock_run_cmd: MagicMock): """Verifies Rust tools with an expected format are successful. Verifies the tool information is passed to the version aggregator as @@ -149,7 +141,7 @@ def test_collect_rust_info_known_ver(self, calls = [ (("cargo", "1.10.0", version_aggregator.VersionTypes.TOOL),), (("cargo make", "0.30.0", version_aggregator.VersionTypes.TOOL),), - (("rustc", "1.10.1", version_aggregator.VersionTypes.TOOL),) + (("rustc", "1.10.1", version_aggregator.VersionTypes.TOOL),), ] mock_version_aggregator.ReportVersion.assert_has_calls(calls, any_order=True) diff --git a/tests.unit/test_edk2_logging.py b/tests.unit/test_edk2_logging.py index fcef7b73..8e6a29b9 100644 --- a/tests.unit/test_edk2_logging.py +++ b/tests.unit/test_edk2_logging.py @@ -15,7 +15,6 @@ class Test_edk2_logging(unittest.TestCase): - def test_can_create_console_logger(self): console_logger = edk2_logging.setup_console_logging(False, False) self.assertIsNot(console_logger, None, "We created a console logger") @@ -50,16 +49,20 @@ def test_can_close_logger(self): def test_scan_compiler_output_generic(self): # Input with compiler errors and warnings - output_stream = io.StringIO("error A1: error 1 details\n" - "warning B2: warning 2 details\n" - "error C3: error 3 details\n" - "warning D4: warning 4 details\n" - "fatal error: details\n") - expected_output = [(logging.ERROR, "Compiler #1 from error 1 details"), - (logging.WARNING, "Compiler #2 from warning 2 details"), - (logging.ERROR, "Compiler #3 from error 3 details"), - (logging.WARNING, "Compiler #4 from warning 4 details"), - (logging.ERROR, "fatal error: details")] + output_stream = io.StringIO( + "error A1: error 1 details\n" + "warning B2: warning 2 details\n" + "error C3: error 3 details\n" + "warning D4: warning 4 details\n" + "fatal error: details\n" + ) + expected_output = [ + (logging.ERROR, "Compiler #1 from error 1 details"), + (logging.WARNING, "Compiler #2 from warning 2 details"), + (logging.ERROR, "Compiler #3 from error 3 details"), + (logging.WARNING, "Compiler #4 from warning 4 details"), + (logging.ERROR, "fatal error: details"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) # Input with no issue (empty string) @@ -78,40 +81,53 @@ def test_scan_compiler_output_generic(self): self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) # Input with only compiler warnings - output_stream = io.StringIO("warning C8: warning details...\n" - "warning D10: info about the issue\n") - expected_output = [(logging.WARNING, "Compiler #8 from warning details..."), - (logging.WARNING, "Compiler #10 from info about the issue")] + output_stream = io.StringIO( + "warning C8: warning details...\n" "warning D10: info about the issue\n" + ) + expected_output = [ + (logging.WARNING, "Compiler #8 from warning details..."), + (logging.WARNING, "Compiler #10 from info about the issue"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) # Input with only compiler errors - output_stream = io.StringIO("dir/file.c error T4: uninitialized variable c...\n" - "dir1/dir2/file1.c error B2: duplicate symbol xyz.\n" - "dir1/file_2.h error 5: header file problem") - expected_output = [(logging.ERROR, "Compiler #4 from dir/file.c uninitialized variable c..."), - (logging.ERROR, "Compiler #2 from dir1/dir2/file1.c duplicate symbol xyz."), - (logging.ERROR, "Compiler #5 from dir1/file_2.h header file problem")] + output_stream = io.StringIO( + "dir/file.c error T4: uninitialized variable c...\n" + "dir1/dir2/file1.c error B2: duplicate symbol xyz.\n" + "dir1/file_2.h error 5: header file problem" + ) + expected_output = [ + (logging.ERROR, "Compiler #4 from dir/file.c uninitialized variable c..."), + (logging.ERROR, "Compiler #2 from dir1/dir2/file1.c duplicate symbol xyz."), + (logging.ERROR, "Compiler #5 from dir1/file_2.h header file problem"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) # Input with near matches that should not match - output_stream = io.StringIO("source.c error A1 error 1 details.\n" - "source warning D6 warning 6 details\n" - "source.obj LNK4: linker 4 details\n" - "script.py 5E: build 5 details\n") + output_stream = io.StringIO( + "source.c error A1 error 1 details.\n" + "source warning D6 warning 6 details\n" + "source.obj LNK4: linker 4 details\n" + "script.py 5E: build 5 details\n" + ) expected_output = [] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) # Test input with different error types - output_stream = io.StringIO("source.c error A1: error 1 details\n" - "source.c warning B2: warning 2 details\n" - "source.dsc error F3: error 3 details\n" - "source.obj error LNK4: linker 4 details\n" - "script.py error 5E: build 5 details\n") - expected_output = [(logging.ERROR, "Compiler #1 from source.c error 1 details"), - (logging.WARNING, "Compiler #2 from source.c warning 2 details"), - (logging.ERROR, "EDK2 #3 from source.dsc error 3 details"), - (logging.ERROR, "Linker #4 from source.obj linker 4 details"), - (logging.ERROR, "Build.py #5 from script.py build 5 details")] + output_stream = io.StringIO( + "source.c error A1: error 1 details\n" + "source.c warning B2: warning 2 details\n" + "source.dsc error F3: error 3 details\n" + "source.obj error LNK4: linker 4 details\n" + "script.py error 5E: build 5 details\n" + ) + expected_output = [ + (logging.ERROR, "Compiler #1 from source.c error 1 details"), + (logging.WARNING, "Compiler #2 from source.c warning 2 details"), + (logging.ERROR, "EDK2 #3 from source.dsc error 3 details"), + (logging.ERROR, "Linker #4 from source.obj linker 4 details"), + (logging.ERROR, "Build.py #5 from script.py build 5 details"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) def test_scan_compiler_output_vs_actual(self): @@ -135,14 +151,28 @@ def test_scan_compiler_output_vs_actual(self): build.py... : error F002: Failed to build module d:\\a\\1\\s\\SetupDataPkg\\ConfApp\\ConfApp.inf [X64, VS2022, DEBUG] - """) # noqa: E501 - - expected_output = [(logging.ERROR, "Linker #2001 from UefiApplicationEntryPoint.lib(ApplicationEntryPoint.obj) : unresolved external symbol __security_check_cookie"), # noqa: E501 - (logging.ERROR, "Linker #2001 from ConfApp.lib(SetupConf.obj) : unresolved external symbol __report_rangecheckfailure"), # noqa: E501 - (logging.ERROR, "Linker #1120 from d:\\a\\1\\s\\Build\\SetupDataPkg\\DEBUG_VS2022\\X64\\SetupDataPkg\\ConfApp\\ConfApp\\DEBUG\\ConfApp.dll : fatal 2 unresolved externals"), # noqa: E501 - (logging.ERROR, "Compiler #1077 from NMAKE : fatal \'\"C:\\Program Files\\Microsoft Visual Studio\\2022\\Enterprise\\VC\\Tools\\MSVC\\14.34.31933\\bin\\Hostx86\\x64\\link.exe\"\' : return code \'0x460\'"), # noqa: E501 - (logging.ERROR, "Compiler #7000 from : Failed to execute command"), # noqa: E501 - (logging.ERROR, "EDK2 #002 from : Failed to build module")] + """) # noqa: E501 + + expected_output = [ + ( + logging.ERROR, + "Linker #2001 from UefiApplicationEntryPoint.lib(ApplicationEntryPoint.obj) : unresolved external symbol __security_check_cookie", + ), # noqa: E501 + ( + logging.ERROR, + "Linker #2001 from ConfApp.lib(SetupConf.obj) : unresolved external symbol __report_rangecheckfailure", + ), # noqa: E501 + ( + logging.ERROR, + "Linker #1120 from d:\\a\\1\\s\\Build\\SetupDataPkg\\DEBUG_VS2022\\X64\\SetupDataPkg\\ConfApp\\ConfApp\\DEBUG\\ConfApp.dll : fatal 2 unresolved externals", + ), # noqa: E501 + ( + logging.ERROR, + "Compiler #1077 from NMAKE : fatal '\"C:\\Program Files\\Microsoft Visual Studio\\2022\\Enterprise\\VC\\Tools\\MSVC\\14.34.31933\\bin\\Hostx86\\x64\\link.exe\"' : return code '0x460'", + ), # noqa: E501 + (logging.ERROR, "Compiler #7000 from : Failed to execute command"), # noqa: E501 + (logging.ERROR, "EDK2 #002 from : Failed to build module"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) def test_scan_compiler_output_vs_linker_actual(self): @@ -154,11 +184,22 @@ def test_scan_compiler_output_vs_linker_actual(self): 1 file(s) copied. copy /y d:/a/1/s/Build/MdeModule/RELEASE_VS2022/IA32/MdeModulePkg/Universal/LegacyRegion2Dxe/LegacyRegion2Dxe/DEBUG/*.pdb d:/a/1/s/Build/MdeModule/RELEASE_VS2022/IA32/MdeModulePkg/Universal/LegacyRegion2Dxe/LegacyRegion2Dxe/OUTPUT NMAKE : fatal error U1077: '"C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Tools/MSVC/14.34.31933/bin/Hostx86/x86/link.exe"' : return code '0x460' - """) # noqa: E501 - - expected_output = [(logging.ERROR, "Linker #2001 from SdMmcPciHcPei.lib(SdMmcPciHcPei.obj) : unresolved external symbol _SafeUint8Add"), # noqa: E501 - (logging.ERROR, "Linker #1120 from d:/a/1/s/Build/MdeModule/RELEASE_VS2022/IA32/MdeModulePkg/Bus/Pci/SdMmcPciHcPei/SdMmcPciHcPei/DEBUG/SdMmcPciHcPei.dll : fatal 1 unresolved externals"), # noqa: E501 - (logging.ERROR, "Compiler #1077 from NMAKE : fatal \'\"C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Tools/MSVC/14.34.31933/bin/Hostx86/x86/link.exe\"\' : return code \'0x460\'")] # noqa: E501 + """) # noqa: E501 + + expected_output = [ + ( + logging.ERROR, + "Linker #2001 from SdMmcPciHcPei.lib(SdMmcPciHcPei.obj) : unresolved external symbol _SafeUint8Add", + ), # noqa: E501 + ( + logging.ERROR, + "Linker #1120 from d:/a/1/s/Build/MdeModule/RELEASE_VS2022/IA32/MdeModulePkg/Bus/Pci/SdMmcPciHcPei/SdMmcPciHcPei/DEBUG/SdMmcPciHcPei.dll : fatal 1 unresolved externals", + ), # noqa: E501 + ( + logging.ERROR, + "Compiler #1077 from NMAKE : fatal '\"C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Tools/MSVC/14.34.31933/bin/Hostx86/x86/link.exe\"' : return code '0x460'", + ), + ] # noqa: E501 self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) def test_scan_compiler_output_gcc_mixed_actual(self): @@ -185,13 +226,19 @@ def test_scan_compiler_output_gcc_mixed_actual(self): build.py... : error F002: Failed to build module /__w/1/s/SetupDataPkg/Library/ConfigVariableListLib/ConfigVariableListLib.inf [AARCH64, GCC5, DEBUG] - """) # noqa: E501 - - expected_output = [(logging.ERROR, "Compiler #error from /__w/1/s/SetupDataPkg/Library/ConfigVariableListLib/ConfigVariableListLib.c conflicting types for `ConvertVariableListToVariableEntry`; have `EFI_STATUS(const void *, UINTN *, CONFIG_VAR_LIST_ENTRY *)` {aka `long long unsigned int(const void *, long long unsigned int *, CONFIG_VAR_LIST_ENTRY *)`}"), # noqa: E501 - (logging.ERROR, "Compiler #7000 from : Failed to execute command"), - (logging.ERROR, "EDK2 #002 from : Failed to build module")] + """) # noqa: E501 + + expected_output = [ + ( + logging.ERROR, + "Compiler #error from /__w/1/s/SetupDataPkg/Library/ConfigVariableListLib/ConfigVariableListLib.c conflicting types for `ConvertVariableListToVariableEntry`; have `EFI_STATUS(const void *, UINTN *, CONFIG_VAR_LIST_ENTRY *)` {aka `long long unsigned int(const void *, long long unsigned int *, CONFIG_VAR_LIST_ENTRY *)`}", + ), # noqa: E501 + (logging.ERROR, "Compiler #7000 from : Failed to execute command"), + (logging.ERROR, "EDK2 #002 from : Failed to build module"), + ] self.assertEqual(edk2_logging.scan_compiler_output(output_stream), expected_output) + def test_NO_secret_filter(caplog): end_list = [" ", ",", ";", ":", " "] start_list = [" ", " ", " ", " ", ":"] @@ -202,10 +249,11 @@ def test_NO_secret_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}{fake_secret}{end}to be caught" caplog.clear() + def test_CI_secret_filter(caplog): caplog.set_level(logging.DEBUG) end_list = [" ", ",", ";", ":", " "] @@ -220,10 +268,11 @@ def test_CI_secret_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}*******{end}to be caught" caplog.clear() + def test_TF_BUILD_secret_filter(caplog): caplog.set_level(logging.DEBUG) end_list = [" ", ",", ";", ":", " "] @@ -238,10 +287,11 @@ def test_TF_BUILD_secret_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}*******{end}to be caught" caplog.clear() + # caplog is a pytest fixture that captures log messages def test_catch_secrets_filter(caplog): caplog.set_level(logging.DEBUG) @@ -257,7 +307,7 @@ def test_catch_secrets_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}*******{end}to be caught" caplog.clear() @@ -266,7 +316,7 @@ def test_catch_secrets_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}{fake_secret}{end}to be caught" caplog.clear() @@ -275,7 +325,7 @@ def test_catch_secrets_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}*******{end}to be caught" caplog.clear() @@ -284,10 +334,11 @@ def test_catch_secrets_filter(caplog): for start, end in zip(start_list, end_list): logging.debug(f"This is a secret{start}{fake_secret}{end}to be caught") - for (record, start, end) in zip(caplog.records, start_list, end_list): + for record, start, end in zip(caplog.records, start_list, end_list): assert record.msg == f"This is a secret{start}{fake_secret}{end}to be caught" caplog.clear() + def test_scan_compiler_output_rust_scenarios(): output_stream = io.StringIO(r""" error: This should be caught @@ -300,12 +351,13 @@ def test_scan_compiler_output_rust_scenarios(): catch this error --> This should not be caught """) expected_output = [ - (logging.ERROR, 'error: This should be caught'), - (logging.ERROR, 'error[E0605]: This should be caught'), - (logging.ERROR, '--> This should be caught'), + (logging.ERROR, "error: This should be caught"), + (logging.ERROR, "error[E0605]: This should be caught"), + (logging.ERROR, "--> This should be caught"), ] assert edk2_logging.scan_compiler_output(output_stream) == expected_output + def test_scan_compiler_output_rust_actual(): output_stream = io.StringIO(r""" [cargo-make] INFO - cargo make 0.37.1 @@ -335,7 +387,9 @@ def test_scan_compiler_output_rust_actual(): [cargo-make] ERROR - Error while executing command, exit code: 101 [cargo-make] WARN - Build Failed. """) - expected_output = [(logging.ERROR, 'error[E0605]: non-primitive cast: `MemorySpaceDescriptor` as `*mut MemorySpaceDescriptor`'), - (logging.ERROR, r'--> RustCrate\\src/main.rs:248:66'), - (logging.ERROR, 'error: could not compile `RustCrate` (bin "RustCrate") due to previous error')] + expected_output = [ + (logging.ERROR, "error[E0605]: non-primitive cast: `MemorySpaceDescriptor` as `*mut MemorySpaceDescriptor`"), + (logging.ERROR, r"--> RustCrate\\src/main.rs:248:66"), + (logging.ERROR, 'error: could not compile `RustCrate` (bin "RustCrate") due to previous error'), + ] assert edk2_logging.scan_compiler_output(output_stream) == expected_output diff --git a/tests.unit/test_edk2_plat_build.py b/tests.unit/test_edk2_plat_build.py index f12c1a09..ee621d5f 100644 --- a/tests.unit/test_edk2_plat_build.py +++ b/tests.unit/test_edk2_plat_build.py @@ -19,7 +19,6 @@ class TestEdk2PlatBuild(unittest.TestCase): - minimalTree = None def setUp(self): @@ -40,11 +39,11 @@ def tearDown(self): @classmethod def restart_logging(cls): - ''' + """ We restart logging as logging is closed at the end of edk2 invocables. We also initialize it at the start. Reloading is the easiest way to get fresh state - ''' + """ logging.shutdown() reload(logging) diff --git a/tests.unit/test_edk2_setup.py b/tests.unit/test_edk2_setup.py index 56b0c1ba..d7f89fe9 100644 --- a/tests.unit/test_edk2_setup.py +++ b/tests.unit/test_edk2_setup.py @@ -145,7 +145,7 @@ def tree(tmpdir): def write_build_file(tree, file): """Writes the requested build file to the base of the tree.""" build_file = tree / "BuildFile.py" - with open(build_file, 'x') as f: + with open(build_file, "x") as f: f.write(file) return build_file @@ -185,7 +185,7 @@ def test_setup_simple_repo(tree: pathlib.Path): # Dirty a submodule file and verify we skip without --FORCE # ############################################################# min_build_file = write_build_file(tree, MIN_BUILD_FILE) - with open(mu_submodule / "License.txt", 'a') as f: + with open(mu_submodule / "License.txt", "a") as f: f.write("TEST") with git.Repo(mu_submodule) as repo: @@ -266,13 +266,15 @@ def test_parse_command_line_options(tree: pathlib.Path): # Test valid command line options empty_build_file = write_build_file(tree, EMPTY_BUILD_FILE) sys.argv = [ - "stuart_setup", "-c", str(empty_build_file), + "stuart_setup", + "-c", + str(empty_build_file), "BLD_*_VAR", "VAR", "BLD_DEBUG_VAR2", "BLD_RELEASE_VAR2", "TEST_VAR=TEST", - "BLD_*_TEST_VAR2=TEST" + "BLD_*_TEST_VAR2=TEST", ] try: edk2_setup.main() @@ -289,11 +291,7 @@ def test_parse_command_line_options(tree: pathlib.Path): # Test invalid command line options for arg in ["BLD_*_VAR=5=10", "BLD_DEBUG_VAR2=5=5", "BLD_RELEASE_VAR3=5=5", "VAR=10=10", "--UnexpectdArg"]: - sys.argv = [ - "stuart_setup", - "-c", str(empty_build_file), - arg - ] + sys.argv = ["stuart_setup", "-c", str(empty_build_file), arg] try: edk2_setup.main() except SystemExit as e: @@ -303,16 +301,18 @@ def test_parse_command_line_options(tree: pathlib.Path): def test_conf_file(tree: pathlib.Path): """Tests that the config file parser works correctly.""" empty_build_file = write_build_file(tree, EMPTY_BUILD_FILE) - build_conf = tree / 'BuildConfig.conf' - with open(build_conf, 'x') as f: - f.writelines([ - "BLD_*_VAR", - "\nVAR", - "\nBLD_DEBUG_VAR2", - "\nBLD_RELEASE_VAR2", - "\nTEST_VAR=TEST", - "\nBLD_*_TEST_VAR2=TEST" - ]) + build_conf = tree / "BuildConfig.conf" + with open(build_conf, "x") as f: + f.writelines( + [ + "BLD_*_VAR", + "\nVAR", + "\nBLD_DEBUG_VAR2", + "\nBLD_RELEASE_VAR2", + "\nTEST_VAR=TEST", + "\nBLD_*_TEST_VAR2=TEST", + ] + ) sys.argv = ["stuart_setup", "-c", str(empty_build_file)] try: @@ -331,14 +331,10 @@ def test_conf_file(tree: pathlib.Path): # Test invalid build config for arg in ["BLD_*_VAR=5=10", "BLD_DEBUG_VAR2=5=5", "BLD_RELEASE_VAR3=5=5", "VAR=10=10"]: build_conf.unlink() - with open(build_conf, 'x') as f: + with open(build_conf, "x") as f: f.writelines([arg]) - sys.argv = [ - "stuart_setup", - "-c", str(empty_build_file), - arg - ] + sys.argv = ["stuart_setup", "-c", str(empty_build_file), arg] try: edk2_setup.main() except SystemExit as e: @@ -352,7 +348,10 @@ def test_backslash_linux(tree: pathlib.Path, caplog): build_file = write_build_file(tree, MIN_BUILD_FILE_BACKSLASH) sys.argv = [ - "stuart_setup", "-c", str(build_file), "--FORCE", + "stuart_setup", + "-c", + str(build_file), + "--FORCE", ] try: @@ -371,7 +370,10 @@ def test_backslash_linux(tree: pathlib.Path, caplog): def test_backslash_windows(tree: pathlib.Path): build_file = write_build_file(tree, MIN_BUILD_FILE_BACKSLASH) sys.argv = [ - "stuart_setup", "-c", str(build_file), "--FORCE", + "stuart_setup", + "-c", + str(build_file), + "--FORCE", ] mu_submodule = tree / "Common" / "MU" diff --git a/tests.unit/test_edk2_update.py b/tests.unit/test_edk2_update.py index e4a97f8e..3eb4c0cc 100644 --- a/tests.unit/test_edk2_update.py +++ b/tests.unit/test_edk2_update.py @@ -20,7 +20,6 @@ class TestEdk2Update(unittest.TestCase): - temp_folders = [] def tearDown(self): @@ -76,7 +75,7 @@ def test_init(self): self.assertIsNotNone(builder) def test_one_level_recursive(self): - ''' makes sure we can do a recursive update ''' + """makes sure we can do a recursive update""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) logging.getLogger().setLevel(logging.WARNING) @@ -84,8 +83,11 @@ def test_one_level_recursive(self): # Do the update updater = self.invoke_update(tree.get_settings_provider_path()) # make sure it worked - self.assertTrue(os.path.exists(os.path.join(WORKSPACE, "Edk2TestUpdate_extdep", - "NuGet.CommandLine_extdep", "extdep_state.yaml"))) + self.assertTrue( + os.path.exists( + os.path.join(WORKSPACE, "Edk2TestUpdate_extdep", "NuGet.CommandLine_extdep", "extdep_state.yaml") + ) + ) build_env, shell_env, failure = updater.PerformUpdate() # we should have no failures self.assertEqual(failure, 0) @@ -93,7 +95,7 @@ def test_one_level_recursive(self): self.assertEqual(len(build_env.extdeps), 2) def test_multiple_extdeps(self): - ''' makes sure we can do multiple ext_deps at the same time ''' + """makes sure we can do multiple ext_deps at the same time""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) num_of_ext_deps = 5 @@ -112,55 +114,48 @@ def test_multiple_extdeps(self): self.assertEqual(len(build_env.extdeps), num_of_ext_deps) def test_duplicate_ext_deps(self): - ''' verifies redundant ext_deps fail ''' + """verifies redundant ext_deps fail""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) logging.getLogger().setLevel(logging.WARNING) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="1", - extra_data={"id:": "CmdLine1"}) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="2", - extra_data={"id:": "CmdLine1"}) + tree.create_ext_dep( + dep_type="nuget", name="NuGet.CommandLine", version="5.2.0", dir_path="1", extra_data={"id:": "CmdLine1"} + ) + tree.create_ext_dep( + dep_type="nuget", name="NuGet.CommandLine", version="5.2.0", dir_path="2", extra_data={"id:": "CmdLine1"} + ) # Do the update. Expect a ValueError from the version aggregator. with self.assertRaises(ValueError): self.invoke_update(tree.get_settings_provider_path(), failure_expected=True) def test_duplicate_ext_deps_skip_dir(self): - ''' verifies redundant ext_deps pass if one is skipped ''' + """verifies redundant ext_deps pass if one is skipped""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) num_of_ext_deps = 1 logging.getLogger().setLevel(logging.WARNING) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="1", - extra_data={"id:": "CmdLine1"}) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="2", - extra_data={"id:": "CmdLine1"}) + tree.create_ext_dep( + dep_type="nuget", name="NuGet.CommandLine", version="5.2.0", dir_path="1", extra_data={"id:": "CmdLine1"} + ) + tree.create_ext_dep( + dep_type="nuget", name="NuGet.CommandLine", version="5.2.0", dir_path="2", extra_data={"id:": "CmdLine1"} + ) # Update GetSkippedDirectories() implementation - with open(tree.get_settings_provider_path(), 'r') as s: + with open(tree.get_settings_provider_path(), "r") as s: settings_text = s.read() settings_text = settings_text.replace( - 'def GetSkippedDirectories(self):\n return ()', - 'def GetSkippedDirectories(self):\n return (\"2\",)') + "def GetSkippedDirectories(self):\n return ()", + 'def GetSkippedDirectories(self):\n return ("2",)', + ) - with open(tree.get_settings_provider_path(), 'w') as s: + with open(tree.get_settings_provider_path(), "w") as s: s.write(settings_text) # Do the update @@ -172,38 +167,45 @@ def test_duplicate_ext_deps_skip_dir(self): self.assertEqual(failure, 0) def test_multiple_duplicate_ext_deps_skip_dir(self): - ''' verifies multiple ext_deps in sub dirs are skipped''' + """verifies multiple ext_deps in sub dirs are skipped""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) num_of_ext_deps = 1 logging.getLogger().setLevel(logging.WARNING) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="first/second", - extra_data={"id:": "CmdLine1"}) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="third/fourth/fifth", - extra_data={"id:": "CmdLine1"}) - tree.create_ext_dep(dep_type="nuget", - name="NuGet.CommandLine", - version="5.2.0", - dir_path="sixth/seventh/eighth", - extra_data={"id:": "CmdLine1"}) + tree.create_ext_dep( + dep_type="nuget", + name="NuGet.CommandLine", + version="5.2.0", + dir_path="first/second", + extra_data={"id:": "CmdLine1"}, + ) + tree.create_ext_dep( + dep_type="nuget", + name="NuGet.CommandLine", + version="5.2.0", + dir_path="third/fourth/fifth", + extra_data={"id:": "CmdLine1"}, + ) + tree.create_ext_dep( + dep_type="nuget", + name="NuGet.CommandLine", + version="5.2.0", + dir_path="sixth/seventh/eighth", + extra_data={"id:": "CmdLine1"}, + ) # Update GetSkippedDirectories() implementation - with open(tree.get_settings_provider_path(), 'r') as s: + with open(tree.get_settings_provider_path(), "r") as s: settings_text = s.read() settings_text = settings_text.replace( - 'def GetSkippedDirectories(self):\n return ()', - 'def GetSkippedDirectories(self):\n return (\"third\",\"sixth\")') + "def GetSkippedDirectories(self):\n return ()", + 'def GetSkippedDirectories(self):\n return ("third","sixth")', + ) - with open(tree.get_settings_provider_path(), 'w') as s: + with open(tree.get_settings_provider_path(), "w") as s: s.write(settings_text) # Do the update @@ -215,7 +217,7 @@ def test_multiple_duplicate_ext_deps_skip_dir(self): self.assertEqual(failure, 0) def test_bad_ext_dep(self): - ''' makes sure we can do an update that will fail ''' + """makes sure we can do an update that will fail""" WORKSPACE = self.get_temp_folder() tree = uefi_tree(WORKSPACE) logging.getLogger().setLevel(logging.WARNING) @@ -226,18 +228,19 @@ def test_bad_ext_dep(self): build_env, shell_env, failure = updater.PerformUpdate() # we should have no failures self.assertEqual(failure, 1) - + + def test_log_error_on_missing_host_specific_folder(caplog, tmpdir): - ''' make sure we can update host_specific extdeps ''' + """make sure we can update host_specific extdeps""" caplog.set_level(logging.ERROR) tree = uefi_tree(tmpdir) tree.create_ext_dep( - dep_type = 'nuget', - name = 'mu_nasm', - version = '20016.1.1', - source = "https://pkgs.dev.azure.com/projectmu/mu/_packaging/Basetools-Binary/nuget/v3/index.json", - dir_path = "first/", - extra_data={"flags": ['host_specific']} + dep_type="nuget", + name="mu_nasm", + version="20016.1.1", + source="https://pkgs.dev.azure.com/projectmu/mu/_packaging/Basetools-Binary/nuget/v3/index.json", + dir_path="first/", + extra_data={"flags": ["host_specific"]}, ) # Should download everything fine. sys.argv = ["stuart_update", "-c", tree.get_settings_provider_path()] @@ -246,12 +249,12 @@ def test_log_error_on_missing_host_specific_folder(caplog, tmpdir): builder.Invoke() except SystemExit as e: assert e.code == 0 - + extdep_base = Path(tmpdir, "first", "mu_nasm_extdep") assert 6 == len(list(extdep_base.iterdir())) # Delete one of the supported hosts - if os.name == 'nt': + if os.name == "nt": host = extdep_base / "Windows-x86-64" else: host = extdep_base / "Linux-x86-64" diff --git a/tests.unit/test_external_dependency.py b/tests.unit/test_external_dependency.py index f60b5050..faa1e336 100644 --- a/tests.unit/test_external_dependency.py +++ b/tests.unit/test_external_dependency.py @@ -61,7 +61,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -71,11 +71,11 @@ def tearDownClass(cls): def test_determine_cache_path(self): nuget_desc = copy.copy(NUGET_TEMPLATE) - nuget_desc['version'] = GOOD_VERSION - nuget_desc['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc["version"] = GOOD_VERSION + nuget_desc["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep = ExtDep(nuget_desc) - cache_path = os.path.join(TEST_DIR, 'stuart_cache') + cache_path = os.path.join(TEST_DIR, "stuart_cache") self.assertIsNone(ext_dep.determine_cache_path()) @@ -83,27 +83,27 @@ def test_determine_cache_path(self): self.assertIsNone(ext_dep.determine_cache_path()) os.makedirs(cache_path) - self.assertTrue(ext_dep.determine_cache_path().startswith(os.path.join(cache_path, 'nuget'))) + self.assertTrue(ext_dep.determine_cache_path().startswith(os.path.join(cache_path, "nuget"))) web_desc = copy.copy(WEB_TEMPLATE) - web_desc['version'] = GOOD_VERSION - web_desc['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + web_desc["version"] = GOOD_VERSION + web_desc["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep = ExtDep(web_desc) ext_dep.set_global_cache_path(cache_path) - self.assertTrue(ext_dep.determine_cache_path().startswith(os.path.join(cache_path, 'web'))) + self.assertTrue(ext_dep.determine_cache_path().startswith(os.path.join(cache_path, "web"))) def test_different_versions_have_different_caches(self): - cache_path = os.path.join(TEST_DIR, 'stuart_cache') + cache_path = os.path.join(TEST_DIR, "stuart_cache") os.makedirs(cache_path) nuget_desc1 = copy.copy(NUGET_TEMPLATE) - nuget_desc1['version'] = GOOD_VERSION - nuget_desc1['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc1["version"] = GOOD_VERSION + nuget_desc1["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep1 = ExtDep(nuget_desc1) nuget_desc2 = copy.copy(NUGET_TEMPLATE) - nuget_desc2['version'] = "7.0.0" - nuget_desc2['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc2["version"] = "7.0.0" + nuget_desc2["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep2 = ExtDep(nuget_desc2) ext_dep1.set_global_cache_path(cache_path) @@ -112,18 +112,18 @@ def test_different_versions_have_different_caches(self): self.assertNotEqual(ext_dep1.determine_cache_path(), ext_dep2.determine_cache_path()) def test_different_sources_have_different_caches(self): - cache_path = os.path.join(TEST_DIR, 'stuart_cache') + cache_path = os.path.join(TEST_DIR, "stuart_cache") os.makedirs(cache_path) nuget_desc1 = copy.copy(NUGET_TEMPLATE) - nuget_desc1['version'] = GOOD_VERSION - nuget_desc1['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc1["version"] = GOOD_VERSION + nuget_desc1["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep1 = ExtDep(nuget_desc1) nuget_desc2 = copy.copy(NUGET_TEMPLATE) - nuget_desc2['version'] = GOOD_VERSION - nuget_desc2['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') - nuget_desc2['source'] = "https://api.nuget.org/v3/different_index.json" + nuget_desc2["version"] = GOOD_VERSION + nuget_desc2["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") + nuget_desc2["source"] = "https://api.nuget.org/v3/different_index.json" ext_dep2 = ExtDep(nuget_desc2) ext_dep1.set_global_cache_path(cache_path) @@ -132,20 +132,20 @@ def test_different_sources_have_different_caches(self): self.assertNotEqual(ext_dep1.determine_cache_path(), ext_dep2.determine_cache_path()) def test_can_copy_to_cache(self): - cache_path = os.path.join(TEST_DIR, 'stuart_cache') + cache_path = os.path.join(TEST_DIR, "stuart_cache") os.makedirs(cache_path) nuget_desc = copy.copy(NUGET_TEMPLATE) - nuget_desc['version'] = GOOD_VERSION - nuget_desc['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc["version"] = GOOD_VERSION + nuget_desc["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep = ExtDep(nuget_desc) ext_dep.set_global_cache_path(cache_path) self.assertFalse(os.path.exists(ext_dep.determine_cache_path())) # Create a new directory with a dummy file. - test_path = os.path.join(TEST_DIR, 'test_path') - test_file = os.path.join(test_path, 'test_file.txt') + test_path = os.path.join(TEST_DIR, "test_path") + test_file = os.path.join(test_path, "test_file.txt") os.makedirs(test_path) with open(test_file, "w") as fp: fp.write("DEADBEEF\n") @@ -154,7 +154,7 @@ def test_can_copy_to_cache(self): ext_dep.copy_to_global_cache(test_path) self.assertTrue(os.path.exists(ext_dep.determine_cache_path())) - copied_file = os.path.join(ext_dep.determine_cache_path(), 'test_file.txt') + copied_file = os.path.join(ext_dep.determine_cache_path(), "test_file.txt") self.assertTrue(os.path.exists(copied_file)) file_contents = None with open(copied_file, "r") as fp: @@ -162,31 +162,31 @@ def test_can_copy_to_cache(self): self.assertTrue("DEADBEEF" in file_contents) def test_can_copy_from_cache(self): - cache_path = os.path.join(TEST_DIR, 'stuart_cache') + cache_path = os.path.join(TEST_DIR, "stuart_cache") os.makedirs(cache_path) nuget_desc = copy.copy(NUGET_TEMPLATE) - nuget_desc['version'] = GOOD_VERSION - nuget_desc['descriptor_file'] = os.path.join(TEST_DIR, 'non_file.yaml') + nuget_desc["version"] = GOOD_VERSION + nuget_desc["descriptor_file"] = os.path.join(TEST_DIR, "non_file.yaml") ext_dep = ExtDep(nuget_desc) ext_dep.set_global_cache_path(cache_path) self.assertFalse(os.path.exists(ext_dep.determine_cache_path())) - test_path = os.path.join(TEST_DIR, 'test_path') - test_file = os.path.join(test_path, 'test_file.txt') + test_path = os.path.join(TEST_DIR, "test_path") + test_file = os.path.join(test_path, "test_file.txt") os.makedirs(test_path) with open(test_file, "w") as fp: fp.write("DEADBEEF\n") ext_dep.copy_to_global_cache(test_path) - test_path2 = os.path.join(TEST_DIR, 'test_path2') + test_path2 = os.path.join(TEST_DIR, "test_path2") self.assertFalse(os.path.exists(test_path2)) ext_dep.copy_from_global_cache(test_path2) self.assertTrue(os.path.exists(test_path2)) - copied_file = os.path.join(test_path2, 'test_file.txt') + copied_file = os.path.join(test_path2, "test_file.txt") self.assertTrue(os.path.exists(copied_file)) file_contents = None with open(copied_file, "r") as fp: @@ -194,5 +194,5 @@ def test_can_copy_from_cache(self): self.assertTrue("DEADBEEF" in file_contents) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_git_dependency.py b/tests.unit/test_git_dependency.py index 9d32c927..5f22dbf6 100644 --- a/tests.unit/test_git_dependency.py +++ b/tests.unit/test_git_dependency.py @@ -23,7 +23,7 @@ short_version = "7fd1a60" short_upper_version = "7FD1A60" -hw_json_template = ''' +hw_json_template = """ { "scope": "global", "type": "git", @@ -32,7 +32,7 @@ "version": "%s", "flags": [] } -''' +""" def prep_workspace(): @@ -62,7 +62,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -151,7 +151,7 @@ def test_verify_invalid_git_repo(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) os.makedirs(ext_dep._local_repo_root_path, exist_ok=True) - with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: + with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), "a") as my_file: my_file.write("Test code\n") self.assertFalse(ext_dep.verify()) @@ -164,7 +164,7 @@ def test_verify_dirty_git_repo(self): ext_dep = GitDependency(ext_dep_descriptor) ext_dep.fetch() # now write a new file - with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: + with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), "a") as my_file: my_file.write("Test code to make repo dirty\n") self.assertFalse(ext_dep.verify()) @@ -218,7 +218,7 @@ def test_clean_dir_but_not_git_repo(self): ext_dep_descriptor = EDF.ExternDepDescriptor(ext_dep_file_path).descriptor_contents ext_dep = GitDependency(ext_dep_descriptor) os.makedirs(ext_dep._local_repo_root_path, exist_ok=True) - with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: + with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), "a") as my_file: my_file.write("Test code\n") ext_dep.clean() self.assertFalse(os.path.isdir(ext_dep.contents_dir)) @@ -233,7 +233,7 @@ def test_clean_dirty_git_repo(self): ext_dep.fetch() self.assertTrue(ext_dep.verify(), "Confirm repo is valid") # now write a new file - with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), 'a') as my_file: + with open(os.path.join(ext_dep._local_repo_root_path, "testfile.txt"), "a") as my_file: my_file.write("Test code to make repo dirty\n") self.assertFalse(ext_dep.verify(), "Confirm repo is dirty") ext_dep.clean() @@ -260,7 +260,7 @@ class TestGitDependencyUrlPatching(unittest.TestCase): "name": "HelloWorld", "source": "https://github.com/octocat/Hello-World.git", "version": "7fd1a60b01f91b314f59955a4e4d4e80d8edf11d", - "flags": [] + "flags": [], } def tearDown(self): @@ -278,35 +278,35 @@ def setUpClass(cls): def test_url_should_not_be_modified_without_env(self): my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR) # Add the indicator for patching. - my_test_descriptor['url_creds_var'] = 'test_creds_var' + my_test_descriptor["url_creds_var"] = "test_creds_var" # Initialize the GitDependency object. git_dep = GitDependency(my_test_descriptor) # Assert that the URL is identical. - self.assertEqual(git_dep.source, my_test_descriptor['source']) + self.assertEqual(git_dep.source, my_test_descriptor["source"]) def test_url_should_not_be_modified_without_descriptor_field(self): my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR) env = shell_environment.GetEnvironment() # Add the var to the environment. - env.set_shell_var('test_creds_var', 'my_stuff') + env.set_shell_var("test_creds_var", "my_stuff") # Initialize the GitDependency object. git_dep = GitDependency(my_test_descriptor) # Assert that the URL is identical. - self.assertEqual(git_dep.source, my_test_descriptor['source']) + self.assertEqual(git_dep.source, my_test_descriptor["source"]) def test_url_should_be_modified_if_creds_are_indicated_and_supplied(self): my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR) # Add the indicator for patching. - my_test_descriptor['url_creds_var'] = 'test_creds_var' + my_test_descriptor["url_creds_var"] = "test_creds_var" env = shell_environment.GetEnvironment() # Add the var to the environment. - env.set_shell_var('test_creds_var', 'my_stuff') + env.set_shell_var("test_creds_var", "my_stuff") # Initialize the GitDependency object. git_dep = GitDependency(my_test_descriptor) @@ -315,5 +315,5 @@ def test_url_should_be_modified_if_creds_are_indicated_and_supplied(self): self.assertEqual(git_dep.source, "https://my_stuff@github.com/octocat/Hello-World.git") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_image_validation.py b/tests.unit/test_image_validation.py index c44221ce..d9aa45af 100644 --- a/tests.unit/test_image_validation.py +++ b/tests.unit/test_image_validation.py @@ -26,6 +26,7 @@ class FileHeader: def __init__(self): self.Machine = 0x8664 + # A Dummy class to represent a PE. @@ -40,9 +41,7 @@ def merge_modified_section_data(self): class TestImageValidationInterface(unittest.TestCase): - def test_add_test(self): - test_manager = IV.TestManager() self.assertEqual(len(test_manager.tests), 0) test_manager.add_test(IV.TestSectionAlignment()) @@ -56,14 +55,8 @@ def test_add_tests(self): def test_test_manager(self): config_data = { - "TARGET_ARCH": { - "X64": "IMAGE_FILE_MACHINE_AMD64" - }, - "IMAGE_FILE_MACHINE_AMD64": { - "DEFAULT": { - "DATA_CODE_SEPARATION": False - } - } + "TARGET_ARCH": {"X64": "IMAGE_FILE_MACHINE_AMD64"}, + "IMAGE_FILE_MACHINE_AMD64": {"DEFAULT": {"DATA_CODE_SEPARATION": False}}, } test_manager = IV.TestManager(config_data=config_data) self.assertEqual(test_manager.config_data, config_data) @@ -82,37 +75,45 @@ def test_write_execute_flags_test(self): 1. If test requirement is not specified, or equal to false, return Result.SKIP 3. Return Result.PASS / Result.FAIL returned based on Characteristic value """ - test_pe0 = PE(sections=[ - Section("S1.1".encode("utf-8"), characteristics=0x80000000), - Section("S1.2".encode("utf-8"), characteristics=0x20000000), - Section("S1.3".encode("utf-8"), characteristics=0x00000000)]) - - test_pe1 = PE(sections=[ - Section("S2.1".encode("utf-8"), characteristics=0xA0000000), - Section("S2.2".encode("utf-8"), characteristics=0x20000000), - Section("S2.3".encode("utf-8"), characteristics=0x00000000)]) - - test_pe2 = PE(sections=[ - Section("S3.1".encode("utf-8"), characteristics=0x20000000), - Section("S3.2".encode("utf-8"), characteristics=0x80000000), - Section("S3.3".encode("utf-8"), characteristics=0xC0000000)]) - - test_pe3 = PE(sections=[ - Section("S4.1".encode("utf-8"), characteristics=0xE0000000)]) - - config_data1 = { - "TARGET_REQUIREMENTS": {"DATA_CODE_SEPARATION": False} - } + test_pe0 = PE( + sections=[ + Section("S1.1".encode("utf-8"), characteristics=0x80000000), + Section("S1.2".encode("utf-8"), characteristics=0x20000000), + Section("S1.3".encode("utf-8"), characteristics=0x00000000), + ] + ) + + test_pe1 = PE( + sections=[ + Section("S2.1".encode("utf-8"), characteristics=0xA0000000), + Section("S2.2".encode("utf-8"), characteristics=0x20000000), + Section("S2.3".encode("utf-8"), characteristics=0x00000000), + ] + ) + + test_pe2 = PE( + sections=[ + Section("S3.1".encode("utf-8"), characteristics=0x20000000), + Section("S3.2".encode("utf-8"), characteristics=0x80000000), + Section("S3.3".encode("utf-8"), characteristics=0xC0000000), + ] + ) + + test_pe3 = PE(sections=[Section("S4.1".encode("utf-8"), characteristics=0xE0000000)]) + + config_data1 = {"TARGET_REQUIREMENTS": {"DATA_CODE_SEPARATION": False}} config_data2 = {"TARGET_REQUIREMENTS": {}} test_write_execute_flags = IV.TestWriteExecuteFlags() - tests = [(test_pe0, IV.Result.PASS), (test_pe1, IV.Result.FAIL), - (test_pe2, IV.Result.PASS), (test_pe3, IV.Result.FAIL)] + tests = [ + (test_pe0, IV.Result.PASS), + (test_pe1, IV.Result.FAIL), + (test_pe2, IV.Result.PASS), + (test_pe3, IV.Result.FAIL), + ] - config_data0 = { - "TARGET_REQUIREMENTS": {"DATA_CODE_SEPARATION": True} - } + config_data0 = {"TARGET_REQUIREMENTS": {"DATA_CODE_SEPARATION": True}} # Test set 1 for i in range(len(tests)): @@ -140,40 +141,40 @@ def test_section_alignment_test(self): 3. Return Result.PASS / Result.FAIL returned based on alignment requirements """ - config_data0 = { - "TARGET_REQUIREMENTS": {}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} - } - config_data1 = { - "TARGET_REQUIREMENTS": {"ALIGNMENT": []}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} - } + config_data0 = {"TARGET_REQUIREMENTS": {}, "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}} + config_data1 = {"TARGET_REQUIREMENTS": {"ALIGNMENT": []}, "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}} config_data2 = { "TARGET_REQUIREMENTS": {"ALIGNMENT": [{"COMPARISON": ">=", "VALUE": 0}]}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } config_data3 = { "TARGET_REQUIREMENTS": {"ALIGNMENT": [{"COMPARISON": "==", "VALUE": 1}]}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } config_data4 = { - "TARGET_REQUIREMENTS": {"ALIGNMENT": [ - {"COMPARISON": ">=", "VALUE": 0}, - {"COMPARISON": ">=", "VALUE": 64}, - {"COMPARISON": "<=", "VALUE": 8192}], - "ALIGNMENT_LOGIC_SEP": "AND"}, - "TARGET_INFO": {} + "TARGET_REQUIREMENTS": { + "ALIGNMENT": [ + {"COMPARISON": ">=", "VALUE": 0}, + {"COMPARISON": ">=", "VALUE": 64}, + {"COMPARISON": "<=", "VALUE": 8192}, + ], + "ALIGNMENT_LOGIC_SEP": "AND", + }, + "TARGET_INFO": {}, } config_data5 = { - "TARGET_REQUIREMENTS": {"ALIGNMENT": [ - {"COMPARISON": ">=", "VALUE": 0}, - {"COMPARISON": ">=", "VALUE": 64}, - {"COMPARISON": "!=", "VALUE": 4096}], - "ALIGNMENT_LOGIC_SEP": "AND"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "TARGET_REQUIREMENTS": { + "ALIGNMENT": [ + {"COMPARISON": ">=", "VALUE": 0}, + {"COMPARISON": ">=", "VALUE": 64}, + {"COMPARISON": "!=", "VALUE": 4096}, + ], + "ALIGNMENT_LOGIC_SEP": "AND", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } test_pe0 = PE(optional_header=OptionalHeader(SectionAlignment=4096)) @@ -181,9 +182,14 @@ def test_section_alignment_test(self): test_pe2 = PE(optional_header=None) test_section_alignment_test = IV.TestSectionAlignment() - tests0 = [(config_data0, IV.Result.SKIP), (config_data1, IV.Result.SKIP), - (config_data2, IV.Result.PASS), (config_data3, IV.Result.FAIL), - (config_data4, IV.Result.PASS), (config_data5, IV.Result.FAIL)] + tests0 = [ + (config_data0, IV.Result.SKIP), + (config_data1, IV.Result.SKIP), + (config_data2, IV.Result.PASS), + (config_data3, IV.Result.FAIL), + (config_data4, IV.Result.PASS), + (config_data5, IV.Result.FAIL), + ] # Test set 1 for i in range(len(tests0)): @@ -191,9 +197,14 @@ def test_section_alignment_test(self): config, result = tests0[i] self.assertEqual(test_section_alignment_test.execute(test_pe0, config), result) - tests1 = [(config_data0, IV.Result.SKIP), (config_data1, IV.Result.SKIP), - (config_data2, IV.Result.WARN), (config_data3, IV.Result.WARN), - (config_data4, IV.Result.WARN), (config_data5, IV.Result.WARN)] + tests1 = [ + (config_data0, IV.Result.SKIP), + (config_data1, IV.Result.SKIP), + (config_data2, IV.Result.WARN), + (config_data3, IV.Result.WARN), + (config_data4, IV.Result.WARN), + (config_data5, IV.Result.WARN), + ] # Test set 2 for i in range(len(tests1)): @@ -208,64 +219,63 @@ def test_section_alignment_test(self): self.assertEqual(test_section_alignment_test.execute(test_pe2, config), result) def test_section_alignment_test2(self): - target_config0 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": ">=", "VALUE": 0}, - {"COMPARISON": "<=", "VALUE": 8192}], - "ALIGNMENT_LOGIC_SEP": "AND"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": ">=", "VALUE": 0}, {"COMPARISON": "<=", "VALUE": 8192}], + "ALIGNMENT_LOGIC_SEP": "AND", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } target_config1 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": "==", "VALUE": 0}, - {"COMPARISON": "<=", "VALUE": 8192}], - "ALIGNMENT_LOGIC_SEP": "AND"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 0}, {"COMPARISON": "<=", "VALUE": 8192}], + "ALIGNMENT_LOGIC_SEP": "AND", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } target_config2 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": "==", "VALUE": 32}, - {"COMPARISON": "==", "VALUE": 4096}], - "ALIGNMENT_LOGIC_SEP": "OR"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 32}, {"COMPARISON": "==", "VALUE": 4096}], + "ALIGNMENT_LOGIC_SEP": "OR", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } target_config3 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": "==", "VALUE": 31}, - {"COMPARISON": "==", "VALUE": 61}], - "ALIGNMENT_LOGIC_SEP": "OR"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 31}, {"COMPARISON": "==", "VALUE": 61}], + "ALIGNMENT_LOGIC_SEP": "OR", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } target_config4 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": "==", "VALUE": 32}, - {"COMPARISON": "==", "VALUE": 64}]}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 32}, {"COMPARISON": "==", "VALUE": 64}] + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } target_config5 = { "TARGET_REQUIREMENTS": { - "ALIGNMENT": [ - {"COMPARISON": "==", "VALUE": 32}, - {"COMPARISON": "==", "VALUE": 64}], - "ALIGNMENT_LOGIC_SEP": "AR"}, - "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}} + "ALIGNMENT": [{"COMPARISON": "==", "VALUE": 32}, {"COMPARISON": "==", "VALUE": 64}], + "ALIGNMENT_LOGIC_SEP": "AR", + }, + "TARGET_INFO": {"MACHINE_TYPE": "", "PROFILE": {}}, } test_section_alignment_test = IV.TestSectionAlignment() pe = PE(optional_header=OptionalHeader(SectionAlignment=4096)) - tests = [(target_config0, IV.Result.PASS), (target_config1, IV.Result.FAIL), (target_config2, IV.Result.PASS), - (target_config3, IV.Result.FAIL), (target_config4, IV.Result.FAIL), (target_config5, IV.Result.FAIL)] + tests = [ + (target_config0, IV.Result.PASS), + (target_config1, IV.Result.FAIL), + (target_config2, IV.Result.PASS), + (target_config3, IV.Result.FAIL), + (target_config4, IV.Result.FAIL), + (target_config5, IV.Result.FAIL), + ] for i in range(len(tests)): with self.subTest("test_section_alignment_and_or_logic", i=i): @@ -280,26 +290,24 @@ def test_subsystem_value_test(self): 3. If subsystem type is invalid, return Result.FAIL 3. return Result.PASS / Result.FAIL returned based on subsystem value """ - config_data0 = { - "TARGET_REQUIREMENTS": {} - } - config_data1 = { - "TARGET_REQUIREMENTS": {"ALLOWED_SUBSYSTEMS": []} - } - config_data2 = { - "TARGET_REQUIREMENTS": {"ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER"]} - } + config_data0 = {"TARGET_REQUIREMENTS": {}} + config_data1 = {"TARGET_REQUIREMENTS": {"ALLOWED_SUBSYSTEMS": []}} + config_data2 = {"TARGET_REQUIREMENTS": {"ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER"]}} config_data3 = { - "TARGET_REQUIREMENTS": {"ALLOWED_SUBSYSTEMS": - ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_APPLICATION"]} + "TARGET_REQUIREMENTS": { + "ALLOWED_SUBSYSTEMS": ["IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", "IMAGE_SUBSYSTEM_EFI_APPLICATION"] + } } test_subsystem_value_test = IV.TestSubsystemValue() TEST_PE0 = PE(optional_header=OptionalHeader(Subsystem=10)) - tests0 = [(config_data0, IV.Result.SKIP), (config_data1, IV.Result.SKIP), - (config_data2, IV.Result.FAIL), (config_data3, IV.Result.PASS)] + tests0 = [ + (config_data0, IV.Result.SKIP), + (config_data1, IV.Result.SKIP), + (config_data2, IV.Result.FAIL), + (config_data3, IV.Result.PASS), + ] for i in range(len(tests0)): with self.subTest("test_subsystem_value0", i=i): @@ -307,8 +315,12 @@ def test_subsystem_value_test(self): self.assertEqual(test_subsystem_value_test.execute(TEST_PE0, config), result) TEST_PE1 = PE(optional_header=OptionalHeader(Subsystem="UEFI_")) - tests1 = [(config_data0, IV.Result.SKIP), (config_data1, IV.Result.SKIP), - (config_data2, IV.Result.FAIL), (config_data3, IV.Result.FAIL)] + tests1 = [ + (config_data0, IV.Result.SKIP), + (config_data1, IV.Result.SKIP), + (config_data2, IV.Result.FAIL), + (config_data3, IV.Result.FAIL), + ] for i in range(len(tests1)): with self.subTest("test_subsystem_value1", i=i): @@ -316,8 +328,12 @@ def test_subsystem_value_test(self): self.assertEqual(test_subsystem_value_test.execute(TEST_PE1, config), result) TEST_PE2 = PE(optional_header=OptionalHeader(Subsystem=None)) - tests2 = [(config_data0, IV.Result.SKIP), (config_data1, IV.Result.SKIP), - (config_data2, IV.Result.WARN), (config_data3, IV.Result.WARN)] + tests2 = [ + (config_data0, IV.Result.SKIP), + (config_data1, IV.Result.SKIP), + (config_data2, IV.Result.WARN), + (config_data3, IV.Result.WARN), + ] for i in range(len(tests2)): with self.subTest("test_subsystem_value2", i=i): @@ -325,7 +341,6 @@ def test_subsystem_value_test(self): self.assertEqual(test_subsystem_value_test.execute(TEST_PE2, config), result) def test_helper_functions(self): - data = 0b00000000 results = [1, 2, 4, 8, 16, 32, 64] @@ -366,14 +381,8 @@ def test_helper_functions(self): "T2": "T", "T3": "T", } - target_config = { - "T2": "F" - } - final_config = { - "T1": "T", - "T2": "F", - "T3": "T" - } + target_config = {"T2": "F"} + final_config = {"T1": "T", "T2": "F", "T3": "T"} self.assertEqual(IV.fill_missing_requirements(default_config, target_config), final_config) @@ -386,7 +395,6 @@ def test_test_interface(self): c.execute(1, 2) def test_get_cli_args(self): - test1 = ["-i", "file.efi"] test2 = ["-i", "file.efi", "-d"] test3 = ["-i", "file.efi", "-p", "APP"] diff --git a/tests.unit/test_nuget.py b/tests.unit/test_nuget.py index a5e8cc53..bf1edb16 100644 --- a/tests.unit/test_nuget.py +++ b/tests.unit/test_nuget.py @@ -12,7 +12,6 @@ class Test_nuget(unittest.TestCase): - def test_can_download_nuget(self): test_dir = tempfile.mkdtemp() nuget_path = os.path.join(test_dir, "NuGet.exe") diff --git a/tests.unit/test_nuget_dependency.py b/tests.unit/test_nuget_dependency.py index f57f34e7..27de13af 100644 --- a/tests.unit/test_nuget_dependency.py +++ b/tests.unit/test_nuget_dependency.py @@ -24,7 +24,7 @@ missing_version = "5.200.13" hw_package_name = "NuGet.CommandLine" -hw_json_template = ''' +hw_json_template = """ { "scope": "global", "type": "nuget", @@ -32,7 +32,7 @@ "source": "https://api.nuget.org/v3/index.json", "version": "%s" } -''' +""" def prep_workspace(): @@ -63,7 +63,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -89,11 +89,10 @@ def tearDown(self): def test_can_get_nuget_path(self): nuget_cmd = NugetDependency.GetNugetCmd() nuget_cmd += ["locals", "global-packages", "-list"] - ret = RunCmd(nuget_cmd[0], ' '.join(nuget_cmd[1:]), outstream=sys.stdout) + ret = RunCmd(nuget_cmd[0], " ".join(nuget_cmd[1:]), outstream=sys.stdout) self.assertEqual(ret, 0) # make sure we have a zero return code def test_missing_nuget(self): - if NugetDependency.NUGET_ENV_VAR_NAME in os.environ: del os.environ[NugetDependency.NUGET_ENV_VAR_NAME] @@ -253,7 +252,7 @@ def test_bad_cached_package(self): # Create a cache with a bad cached package. # # First, create the cache. - cache_dir = os.path.join(test_dir, 'nuget_test_bad_cache') + cache_dir = os.path.join(test_dir, "nuget_test_bad_cache") os.mkdir(cache_dir) ext_dep.nuget_cache_path = cache_dir # Then create the directories inside the cache that should hold the contents. @@ -279,7 +278,7 @@ def test_good_cached_package(self): # Create a cache with a good cached package. # # First, create the cache. - cache_dir = os.path.join(test_dir, 'nuget_test_good_cache') + cache_dir = os.path.join(test_dir, "nuget_test_good_cache") os.mkdir(cache_dir) ext_dep.nuget_cache_path = cache_dir # Then create the directories inside the cache that should hold the contents. @@ -298,5 +297,5 @@ def test_good_cached_package(self): self.assertTrue(ext_dep.verify()) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_nuget_publish.py b/tests.unit/test_nuget_publish.py index d713e47a..fa895fa6 100644 --- a/tests.unit/test_nuget_publish.py +++ b/tests.unit/test_nuget_publish.py @@ -105,7 +105,12 @@ def test_push(self): nuget.SetBasicData("EDK2", "BSD-2-Clause", "https://project_url", "descr", "https://server", "copyright") tempfolder_out = tempfile.mkdtemp() spec = os.path.join(tempfolder_out, "test.nuspec") - test_nuget_publish.write_to_file(spec, ["This is a legit nuget file lol", ]) + test_nuget_publish.write_to_file( + spec, + [ + "This is a legit nuget file lol", + ], + ) ret = nuget.Push(spec, "") self.assertEqual(ret, 1) @@ -121,7 +126,12 @@ def test_pack_license_espression_invalid(self): while len(release_notes) <= nuget_publishing.NugetSupport.RELEASE_NOTE_SHORT_STRING_MAX_LENGTH: release_notes += f"This is now {len(release_notes)} characters long. " # write a file that can be packaged by nuget - test_nuget_publish.write_to_file(outfile, [release_notes, ]) + test_nuget_publish.write_to_file( + outfile, + [ + release_notes, + ], + ) ret = nuget.Pack(version, tempfolder_out, tempfolder_in, release_notes) self.assertEqual(ret, 0) spec = os.path.join(tempfolder_out, "test.nuspec") @@ -133,36 +143,40 @@ def test_pack_license_espression_invalid(self): def test_main_new_and_pack_LicenseIdentifier(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -171,38 +185,42 @@ def test_main_new_and_pack_LicenseIdentifier(self): def test_main_new_and_pack_CustomLicense_valid(self): args = sys.argv tempfolder = tempfile.mkdtemp() - open(os.path.join(tempfolder, 'license.txt'), 'w') - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder] + open(os.path.join(tempfolder, "license.txt"), "w") + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--Copyright", - "2023", - "--InputFolderPath", - tempfolder, - "--CustomLicensePath", - os.path.join(tempfolder, 'license.txt')] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--Copyright", + "2023", + "--InputFolderPath", + tempfolder, + "--CustomLicensePath", + os.path.join(tempfolder, "license.txt"), + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -211,32 +229,36 @@ def test_main_new_and_pack_CustomLicense_valid(self): def test_main_new_and_pack_no_CustomLicense(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] self.assertRaises(Exception, nuget_publishing.main) sys.argv = args @@ -244,34 +266,38 @@ def test_main_new_and_pack_no_CustomLicense(self): def test_main_new_and_pack_CustomLicense_invalid_path(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--CustomLicensePath", - "/bad/path/license.txt"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--CustomLicensePath", + "/bad/path/license.txt", + ] self.assertRaises(Exception, nuget_publishing.main) sys.argv = args @@ -279,35 +305,39 @@ def test_main_new_and_pack_CustomLicense_invalid_path(self): def test_main_new_and_pack_CustomLicense_invalid_license_name(self): args = sys.argv tempfolder = tempfile.mkdtemp() - open(os.path.join(tempfolder, 'license2.txt'), 'w') - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder] + open(os.path.join(tempfolder, "license2.txt"), "w") + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--CustomLicensePath", - os.path.join(tempfolder, 'license2.txt')] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--CustomLicensePath", + os.path.join(tempfolder, "license2.txt"), + ] self.assertRaises(Exception, nuget_publishing.main) sys.argv = args @@ -315,38 +345,42 @@ def test_main_new_and_pack_CustomLicense_invalid_license_name(self): def test_main_new_RepositoryType_and_pack(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2", - "--RepositoryType", - "git"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + "--RepositoryType", + "git", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -355,38 +389,42 @@ def test_main_new_RepositoryType_and_pack(self): def test_main_new_RepositoryUrl_and_pack(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2", - "--RepositoryUrl", - "https://github.com/microsoft/mu_basecore"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + "--RepositoryUrl", + "https://github.com/microsoft/mu_basecore", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -395,38 +433,42 @@ def test_main_new_RepositoryUrl_and_pack(self): def test_main_new_RepositoryBranch_and_pack(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2", - "--RepositoryBranch", - "main"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + "--RepositoryBranch", + "main", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -435,38 +477,42 @@ def test_main_new_RepositoryBranch_and_pack(self): def test_main_new_RepositoryCommit_and_pack(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2", - "--RepositoryCommit", - "cd845afd5c3c838a9f7af7dad238452ae9a17146"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + "--RepositoryCommit", + "cd845afd5c3c838a9f7af7dad238452ae9a17146", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -475,44 +521,48 @@ def test_main_new_RepositoryCommit_and_pack(self): def test_main_new_RepositoryAll_and_pack(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2", - "--RepositoryType", - "git", - "--RepositoryUrl", - "https://github.com/microsoft/mu_plus", - "--RepositoryBranch", - "master", - "--RepositoryCommit", - "06df12360d561b2007e03503491510c36426d860"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + "--RepositoryType", + "git", + "--RepositoryUrl", + "https://github.com/microsoft/mu_plus", + "--RepositoryBranch", + "master", + "--RepositoryCommit", + "06df12360d561b2007e03503491510c36426d860", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -521,38 +571,42 @@ def test_main_new_RepositoryAll_and_pack(self): def test_main_new_and_pack_RepositoryType(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--RepositoryType", - "git"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--RepositoryType", + "git", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -561,38 +615,42 @@ def test_main_new_and_pack_RepositoryType(self): def test_main_new_and_pack_RepositoryUrl(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--RepositoryUrl", - "https://github.com/microsoft/mu_basecore"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--RepositoryUrl", + "https://github.com/microsoft/mu_basecore", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -601,38 +659,42 @@ def test_main_new_and_pack_RepositoryUrl(self): def test_main_new_and_pack_RepositoryBranch(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--RepositoryBranch", - "main"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--RepositoryBranch", + "main", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -641,38 +703,42 @@ def test_main_new_and_pack_RepositoryBranch(self): def test_main_new_and_pack_RepositoryCommit(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--RepositoryCommit", - "cd845afd5c3c838a9f7af7dad238452ae9a17146"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--RepositoryCommit", + "cd845afd5c3c838a9f7af7dad238452ae9a17146", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -681,44 +747,48 @@ def test_main_new_and_pack_RepositoryCommit(self): def test_main_new_and_pack_RepositoryAll(self): args = sys.argv tempfolder = tempfile.mkdtemp() - sys.argv = ["", - "--Operation", - "New", - "--Name", - "Test", - "--Author", - "test", - "--ProjectUrl", - "https://github.com", - "--Description", - "test", - "--FeedUrl", - " https://github.com", - "--ConfigFileFolderPath", - tempfolder, - "--LicenseIdentifier", - "BSD2"] + sys.argv = [ + "", + "--Operation", + "New", + "--Name", + "Test", + "--Author", + "test", + "--ProjectUrl", + "https://github.com", + "--Description", + "test", + "--FeedUrl", + " https://github.com", + "--ConfigFileFolderPath", + tempfolder, + "--LicenseIdentifier", + "BSD2", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) - sys.argv = ["", - "--Operation", - "Pack", - "--ConfigFilePath", - os.path.join(tempfolder, "Test.config.yaml"), - "--Version", - "1.0.0", - "--InputFolderPath", - tempfolder, - "--RepositoryType", - "git", - "--RepositoryUrl", - "https://github.com/microsoft/mu_plus", - "--RepositoryBranch", - "master", - "--RepositoryCommit", - "06df12360d561b2007e03503491510c36426d860"] + sys.argv = [ + "", + "--Operation", + "Pack", + "--ConfigFilePath", + os.path.join(tempfolder, "Test.config.yaml"), + "--Version", + "1.0.0", + "--InputFolderPath", + tempfolder, + "--RepositoryType", + "git", + "--RepositoryUrl", + "https://github.com/microsoft/mu_plus", + "--RepositoryBranch", + "master", + "--RepositoryCommit", + "06df12360d561b2007e03503491510c36426d860", + ] ret = nuget_publishing.main() self.assertEqual(ret, 0) @@ -727,5 +797,5 @@ def test_main_new_and_pack_RepositoryAll(self): # TODO: finish unit test -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_omnicache.py b/tests.unit/test_omnicache.py index 584a0aad..2a5edc09 100644 --- a/tests.unit/test_omnicache.py +++ b/tests.unit/test_omnicache.py @@ -63,7 +63,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -80,32 +80,31 @@ def test_omnicache_new(self): # check that the new cache was created as a bare repo out = StringIO() gitret = utility_functions.RunCmd("git", "rev-parse --is-bare-repository", workingdir=testcache, outstream=out) - assert (gitret == 0) - assert (out.getvalue().strip().lower() == "true") + assert gitret == 0 + assert out.getvalue().strip().lower() == "true" # check that it has the right metadata out = StringIO() - gitret = utility_functions.RunCmd("git", - "config --local omnicache.metadata.version", - workingdir=testcache, - outstream=out) - assert (gitret == 0) - assert (out.getvalue().strip().lower() == omnicache.OMNICACHE_VERSION) + gitret = utility_functions.RunCmd( + "git", "config --local omnicache.metadata.version", workingdir=testcache, outstream=out + ) + assert gitret == 0 + assert out.getvalue().strip().lower() == omnicache.OMNICACHE_VERSION # check that omnicache thinks it's valid (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # remove the metadata to simulate an older omnicache that is candidate for conversion - gitret = utility_functions.RunCmd("git", - "config --local --unset omnicache.metadata.version", - workingdir=testcache) - assert (gitret == 0) + gitret = utility_functions.RunCmd( + "git", "config --local --unset omnicache.metadata.version", workingdir=testcache + ) + assert gitret == 0 # check that omnicache thinks it's not valid but convertible. (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (convertible) + assert not valid + assert convertible # attempt to create a new cache - test repo is valid git repo, but doesn't have the meta. with self.assertRaises(RuntimeError): @@ -115,13 +114,14 @@ def test_omnicache_new(self): gitret = utility_functions.RunCmd( "git", "config --local omnicache.metadata.version {0}".format(omnicache.OMNICACHE_VERSION + "x"), - workingdir=testcache) - assert (gitret == 0) + workingdir=testcache, + ) + assert gitret == 0 # check that omnicache thinks it's not valid but convertible. (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (convertible) + assert not valid + assert convertible # attempt to create a new cache - test repo is valid git repo, but doesn't have the expected meta. with self.assertRaises(RuntimeError): @@ -129,12 +129,12 @@ def test_omnicache_new(self): # now make it a non-bare (but technically valid) git repo gitret = utility_functions.RunCmd("git", "config --local core.bare false", workingdir=testcache) - assert (gitret == 0) + assert gitret == 0 # check that omnicache thinks it's not valid and not convertible. (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (not convertible) + assert not valid + assert not convertible # attempt to create a new cache - test repo is valid non-bare git repo with self.assertRaises(RuntimeError): @@ -145,8 +145,8 @@ def test_omnicache_new(self): # check that omnicache thinks it's not valid and not convertible. (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (not convertible) + assert not valid + assert not convertible # attempt to create a new cache - test repo is not valid git repo with self.assertRaises(RuntimeError): @@ -159,13 +159,13 @@ def test_omnicache_convert(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # remove the metadata to simulate an older omnicache that is candidate for conversion - gitret = utility_functions.RunCmd("git", - "config --local --unset omnicache.metadata.version", - workingdir=testcache) - assert (gitret == 0) + gitret = utility_functions.RunCmd( + "git", "config --local --unset omnicache.metadata.version", workingdir=testcache + ) + assert gitret == 0 # add an empty file to simulate the old config yaml with open(os.path.join(testcache, omnicache.PRE_0_11_OMNICACHE_FILENAME), "w") as yf: @@ -173,70 +173,71 @@ def test_omnicache_convert(self): # confirm that _ValidateOmnicache correctly identifies cache state (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (convertible) + assert not valid + assert convertible # add a traditionally-named remote to the cache to simulate an older omnicache gitret = utility_functions.RunCmd( "git", "remote add pytools-ext https://github.com/tianocore/edk2-pytool-extensions.git", - workingdir=testcache) - assert (gitret == 0) + workingdir=testcache, + ) + assert gitret == 0 # add a second copy of the remote remote with a different name to the cache to simulate a duplicate gitret = utility_functions.RunCmd( "git", "remote add pytools-ext2 https://github.com/tianocore/edk2-pytool-extensions.git", - workingdir=testcache) - assert (gitret == 0) + workingdir=testcache, + ) + assert gitret == 0 # fetch the remote and its tags to populate the cache for conversion. gitret = utility_functions.RunCmd("git", "fetch pytools-ext --tags", workingdir=testcache) - assert (gitret == 0) + assert gitret == 0 # confirm that _ValidateOmnicache still correctly identifies cache state (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (convertible) + assert not valid + assert convertible # re-create the omnicache object to trigger conversion oc = omnicache.Omnicache(testcache, create=False, convert=True) # validate converted cache (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # verify that old config file was deleted. - assert (not os.path.exists(os.path.join(testcache, omnicache.PRE_0_11_OMNICACHE_FILENAME))) + assert not os.path.exists(os.path.join(testcache, omnicache.PRE_0_11_OMNICACHE_FILENAME)) # verify that the traditionally-named remote is no longer in the cache (it should have been renamed with a UUID) remotes = omnicache.Omnicache.GetRemotes(testcache) - assert ("pytools-ext" not in remotes.keys()) + assert "pytools-ext" not in remotes.keys() # verify that the URL still is in the cache - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" in remotes.values()) + assert "https://github.com/tianocore/edk2-pytool-extensions.git" in remotes.values() # verify that there is exactly one entry in the cache for this URL (duplicates should be removed) - assert (sum(url == "https://github.com/tianocore/edk2-pytool-extensions.git" for url in remotes.values()) == 1) + assert sum(url == "https://github.com/tianocore/edk2-pytool-extensions.git" for url in remotes.values()) == 1 # verify that the former remote name is now the "omnicache display name" - for (name, url) in remotes.items(): - if (url == "https://github.com/tianocore/edk2-pytool-extensions.git"): + for name, url in remotes.items(): + if url == "https://github.com/tianocore/edk2-pytool-extensions.git": out = StringIO() - gitret = utility_functions.RunCmd("git", - f"config --local omnicache.{name}.displayname", - workingdir=testcache, - outstream=out) - assert (gitret == 0) + gitret = utility_functions.RunCmd( + "git", f"config --local omnicache.{name}.displayname", workingdir=testcache, outstream=out + ) + assert gitret == 0 displayname = out.getvalue().strip() # if there are duplicates, which displayname is chosen is arbitrary, so allow either. - assert (displayname == "pytools-ext" or displayname == "pytools-ext2") + assert displayname == "pytools-ext" or displayname == "pytools-ext2" # verify that there are no global tags in the root. out = StringIO() gitret = utility_functions.RunCmd("git", "tag -l", workingdir=testcache, outstream=out) - assert (gitret == 0) - assert (out.getvalue().strip() == "") + assert gitret == 0 + assert out.getvalue().strip() == "" # add an empty file to simulate the old config yaml, but leave metadata in place. # this simulates the case where an old version of omnicache ran and updated the remotes @@ -247,8 +248,8 @@ def test_omnicache_convert(self): # confirm that _ValidateOmnicache correctly identifies cache state (valid, convertible) = oc._ValidateOmnicache() - assert (not valid) - assert (convertible) + assert not valid + assert convertible def test_omnicache_add_remove(self): testcache = os.path.join(os.path.abspath(os.getcwd()), test_dir, "testcache") @@ -257,72 +258,80 @@ def test_omnicache_add_remove(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # add a remote with display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext") - assert (ret == 0) + assert ret == 0 - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1) - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" - in omnicache.Omnicache.GetRemotes(testcache).values()) + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions.git" + in omnicache.Omnicache.GetRemotes(testcache).values() + ) # get the remote UUID remoteName = oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteName is not None) + assert remoteName is not None # confirm that remote data is as expected remoteData = oc.GetRemoteData() - assert (len(remoteData.keys()) == 1) - assert (remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteData[remoteName]["displayname"] == "pytools-ext") + assert len(remoteData.keys()) == 1 + assert remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git" + assert remoteData[remoteName]["displayname"] == "pytools-ext" # remove the remote and make sure it is gone ret = oc.RemoveRemote("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (ret == 0) - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 0) - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" - not in omnicache.Omnicache.GetRemotes(testcache).values()) + assert ret == 0 + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 0 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions.git" + not in omnicache.Omnicache.GetRemotes(testcache).values() + ) # add a remote without display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (ret == 0) + assert ret == 0 - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1) - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" - in omnicache.Omnicache.GetRemotes(testcache).values()) + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions.git" + in omnicache.Omnicache.GetRemotes(testcache).values() + ) # get the remote UUID remoteName = oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteName is not None) + assert remoteName is not None # confirm that remote data is as expected remoteData = oc.GetRemoteData() - assert (len(remoteData.keys()) == 1) - assert (remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git") - assert ("displayname" not in remoteData[remoteName]) + assert len(remoteData.keys()) == 1 + assert remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git" + assert "displayname" not in remoteData[remoteName] # add a remote that already exists (with new display name) and make sure it is treated as an update. ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext2") - assert (ret == 0) + assert ret == 0 - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1) - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" - in omnicache.Omnicache.GetRemotes(testcache).values()) + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions.git" + in omnicache.Omnicache.GetRemotes(testcache).values() + ) # get the remote UUID remoteName = oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteName is not None) + assert remoteName is not None # confirm that remote data is as expected remoteData = oc.GetRemoteData() - assert (len(remoteData.keys()) == 1) - assert (remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteData[remoteName]["displayname"] == "pytools-ext2") + assert len(remoteData.keys()) == 1 + assert remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git" + assert remoteData[remoteName]["displayname"] == "pytools-ext2" # attempt to remove a non-existent remote ret = oc.RemoveRemote("http://thisisnot.com/good.git") - assert (ret != 0) + assert ret != 0 def test_omnicache_update(self): testcache = os.path.join(os.path.abspath(os.getcwd()), test_dir, "testcache") @@ -331,43 +340,48 @@ def test_omnicache_update(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # add a remote with display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext") - assert (ret == 0) + assert ret == 0 - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1) - assert ("https://github.com/tianocore/edk2-pytool-extensions.git" - in omnicache.Omnicache.GetRemotes(testcache).values()) + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions.git" + in omnicache.Omnicache.GetRemotes(testcache).values() + ) # get the remote UUID remoteName = oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteName is not None) + assert remoteName is not None # update the URL and displayname of the remote ret = oc.UpdateRemote( "https://github.com/tianocore/edk2-pytool-extensions.git", newUrl="https://github.com/tianocore/edk2-pytool-extensions2.git", - newName="pytools-ext2") - assert (ret == 0) + newName="pytools-ext2", + ) + assert ret == 0 - assert (len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1) - assert ("https://github.com/tianocore/edk2-pytool-extensions2.git" - in omnicache.Omnicache.GetRemotes(testcache).values()) + assert len(omnicache.Omnicache.GetRemotes(testcache).keys()) == 1 + assert ( + "https://github.com/tianocore/edk2-pytool-extensions2.git" + in omnicache.Omnicache.GetRemotes(testcache).values() + ) # make sure UUID didn't change - assert (remoteName == oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions2.git")) + assert remoteName == oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions2.git") # confirm that remote data is as expected remoteData = oc.GetRemoteData() - assert (len(remoteData.keys()) == 1) - assert (remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git") - assert (remoteData[remoteName]["displayname"] == "pytools-ext2") + assert len(remoteData.keys()) == 1 + assert remoteData[remoteName]["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git" + assert remoteData[remoteName]["displayname"] == "pytools-ext2" # update a non-existent URL in the cache and confirm error is returned ret = oc.UpdateRemote("https://not.a.real.url.com/git") - assert (ret != 0) + assert ret != 0 def test_omnicache_fetch(self): testcache = os.path.join(os.path.abspath(os.getcwd()), test_dir, "testcache") @@ -376,29 +390,29 @@ def test_omnicache_fetch(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # add a remote with display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext") - assert (ret == 0) + assert ret == 0 # fetch the remote ret = oc.Fetch() - assert (ret == 0) + assert ret == 0 # fetch the remote with 4 jobs ret = oc.Fetch(jobs=4) - assert (ret == 0) + assert ret == 0 # get the remote UUID remoteName = oc._LookupRemoteForUrl("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (remoteName is not None) + assert remoteName is not None # verify that branches were fetched into the omnicache - assert (len(os.listdir(os.path.join(testcache, "refs", "remotes", remoteName))) != 0) + assert len(os.listdir(os.path.join(testcache, "refs", "remotes", remoteName))) != 0 # verify that tags were fetched into the omnicache - assert (len(os.listdir(os.path.join(testcache, "refs", "rtags", remoteName))) != 0) + assert len(os.listdir(os.path.join(testcache, "refs", "rtags", remoteName))) != 0 def test_omnicache_list(self): testcache = os.path.join(os.path.abspath(os.getcwd()), test_dir, "testcache") @@ -407,13 +421,13 @@ def test_omnicache_list(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid oc.List() # add a remote with display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext") - assert (ret == 0) + assert ret == 0 oc.List() @@ -425,36 +439,36 @@ def test_config_files(self): oc = omnicache.Omnicache(testcache, create=True, convert=False) (valid, _) = oc._ValidateOmnicache() - assert (valid) + assert valid # add a remote with display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions.git", name="pytools-ext") - assert (ret == 0) + assert ret == 0 # add a remote with no display name ret = oc.AddRemote("https://github.com/tianocore/edk2-pytool-extensions2.git") - assert (ret == 0) + assert ret == 0 # export yaml cfg ret = omnicache.Export(oc, testyaml) - assert (ret == 0) + assert ret == 0 # inspect the yaml for correctness with open(testyaml) as yf: content = yaml.safe_load(yf) - assert ("remotes" in content) - assert (len(content["remotes"]) == 2) + assert "remotes" in content + assert len(content["remotes"]) == 2 for remote in content["remotes"]: - if (remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git"): - assert (remote["name"] == "pytools-ext") - elif (remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git"): - assert (omnicache.Omnicache._IsValidUuid(remote["name"])) + if remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git": + assert remote["name"] == "pytools-ext" + elif remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git": + assert omnicache.Omnicache._IsValidUuid(remote["name"]) # remove the "display name" for input test below del remote["name"] else: # not one of the URLs we populated above = bad. - assert (remote["url"] not in remote.values()) + assert remote["url"] not in remote.values() # save the yaml file (since we removed one of the displaynames) with open(testyaml, "w") as yf: @@ -462,26 +476,26 @@ def test_config_files(self): # remove the remotes ret = oc.RemoveRemote("https://github.com/tianocore/edk2-pytool-extensions.git") - assert (ret == 0) + assert ret == 0 ret = oc.RemoveRemote("https://github.com/tianocore/edk2-pytool-extensions2.git") - assert (ret == 0) + assert ret == 0 # confirm we have no remotes - assert (len(oc.GetRemoteData()) == 0) + assert len(oc.GetRemoteData()) == 0 # import yaml cfg ret = omnicache.ProcessInputConfig(oc, testyaml) - assert (ret == 0) + assert ret == 0 # check resulting omnicache config for remote in oc.GetRemoteData().values(): - if (remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git"): - assert (remote["displayname"] == "pytools-ext") - elif (remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git"): - assert ("displayname" not in remote) + if remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions.git": + assert remote["displayname"] == "pytools-ext" + elif remote["url"] == "https://github.com/tianocore/edk2-pytool-extensions2.git": + assert "displayname" not in remote else: # not one of the URLs we populated above = bad. - assert (remote["url"] not in remote.values()) + assert remote["url"] not in remote.values() def test_omnicache_main(self): testcache = os.path.join(os.path.abspath(os.getcwd()), test_dir, "testcache") @@ -489,15 +503,15 @@ def test_omnicache_main(self): oldargs = sys.argv sys.argv = ["omnicache", "--init", testcache] ret = omnicache.main() - assert (ret == 0) + assert ret == 0 sys.argv = ["omnicache", "--new", testcache] ret = omnicache.main() - assert (ret != 0) + assert ret != 0 sys.argv = ["omnicache", "--scan", testcache, testcache] ret = omnicache.main() - assert (ret == 0) + assert ret == 0 sys.argv = oldargs -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_pyopenssl_signer.py b/tests.unit/test_pyopenssl_signer.py index 51dfc6d8..74d16758 100644 --- a/tests.unit/test_pyopenssl_signer.py +++ b/tests.unit/test_pyopenssl_signer.py @@ -50,7 +50,7 @@ # spell-checker:disable # TESTCERT1 has no password -TESTCERT1 = b'MIIJQQIBAzCCCQcGCSqGSIb3DQEHAaCCCPgEggj0MIII8D\ +TESTCERT1 = b"MIIJQQIBAzCCCQcGCSqGSIb3DQEHAaCCCPgEggj0MIII8D\ CCA6cGCSqGSIb3DQEHBqCCA5gwggOUAgEAMIIDjQYJKoZIhvcNAQcBMBwGCi\ qGSIb3DQEMAQYwDgQIlimkkLBfTQ8CAggAgIIDYAGQRMWf8SbXd6nZPL2o11\ LF1JrXLCCc+RwJCDnAJD8cNFUwq4JrOc4qJOYr6QZwD//3LfHeNLwfsi+3RC\ @@ -102,10 +102,10 @@ 2CfsFEIMjtuid/ggz/pUYCb88PN3BPoQ7GO/jv6Vi6F4oCQz+Y0srbmIStNj\ qKXmc5OHIJoRkbrbFi4+2BeevncDADGrhl3heGWSmRlbOPxrryNrDPw7cbMS\ UwIwYJKoZIhvcNAQkVMRYEFILT9pNixD3s66GdK3I48b/dr23DMDEwITAJBg\ -UrDgMCGgUABBTdiDC3a0y1gAbO1eZqveI3Zd0BrwQIFO8dSTjGLhMCAggA' +UrDgMCGgUABBTdiDC3a0y1gAbO1eZqveI3Zd0BrwQIFO8dSTjGLhMCAggA" # TESTCERT2 has a password -TESTCERT2 = b'MIIKaQIBAzCCCiUGCSqGSIb3DQEHAaCCChYEggoSMIIKDj\ +TESTCERT2 = b"MIIKaQIBAzCCCiUGCSqGSIb3DQEHAaCCChYEggoSMIIKDj\ CCBg8GCSqGSIb3DQEHAaCCBgAEggX8MIIF+DCCBfQGCyqGSIb3DQEMCgECoI\ IE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAh8sw/XNGIGZwICB9AEggTYeUQ8gJ\ GII6HPDeBx4bGH2zkS1ybw4hToh8c9UMd3gwDviCobjkSCidTGZPwkvcfPT8\ @@ -164,41 +164,31 @@ xUOq2KAMBksmMAg0Tyc44jDOHiinR7sARTUcNncqP5qJ3d7v7DsCCQsiFD1x\ NEyQpgrD5WAhSaKiZxDvvkj/GNNVXpdiFr8RGa/IlD0+JdiZ3ujt45OduOhs\ ySr03eOImlzjA7MB8wBwYFKw4DAhoEFKn9FVak9/MKt1kvn+GIpIdTy/+pBB\ -R7oStX6AVc66qjoj9/dgAPJTqLBwICB9A=' +R7oStX6AVc66qjoj9/dgAPJTqLBwICB9A=" # spell-checker:enable class Test_pyopenssl_signer(unittest.TestCase): - def test_empty(self): with self.assertRaises((KeyError, ValueError)): pyopenssl_signer.sign(None, {}, {}) def test_proper_options_good_key_no_pass(self): - signer = { - 'key_file_format': 'pkcs12', - 'key_data': b64decode(TESTCERT1) - } + signer = {"key_file_format": "pkcs12", "key_data": b64decode(TESTCERT1)} signature = { - 'type': 'bare', - 'encoding': 'binary', - 'hash_alg': 'sha256', - + "type": "bare", + "encoding": "binary", + "hash_alg": "sha256", } data = "Data for testing signer".encode() pyopenssl_signer.sign(data, signature, signer) def test_proper_options_good_key_pass(self): - signer = { - 'key_file_format': 'pkcs12', - 'key_data': b64decode(TESTCERT2), - 'key_file_password': 'password' - } + signer = {"key_file_format": "pkcs12", "key_data": b64decode(TESTCERT2), "key_file_password": "password"} signature = { - 'type': 'bare', - 'encoding': 'binary', - 'hash_alg': 'sha256', - + "type": "bare", + "encoding": "binary", + "hash_alg": "sha256", } data = "Data for testing signer".encode() pyopenssl_signer.sign(data, signature, signer) @@ -206,15 +196,11 @@ def test_proper_options_good_key_pass(self): def test_proper_options_bad_key(self): # we're going to assume that we're with self.assertRaises(ValueError): - signer = { - 'key_file_format': 'pkcs12', - 'key_data': "hello there" - } + signer = {"key_file_format": "pkcs12", "key_data": "hello there"} signature = { - 'type': 'bare', - 'encoding': 'binary', - 'hash_alg': 'sha256', - + "type": "bare", + "encoding": "binary", + "hash_alg": "sha256", } pyopenssl_signer.sign(None, signature, signer) @@ -222,15 +208,12 @@ def test_invalid_type(self): # we're going to assume that we're with self.assertRaises(ValueError): signature = { - 'type': 'bad_type', + "type": "bad_type", } pyopenssl_signer.sign(None, signature, {}) def test_invalid_type_options(self): # we're going to assume that we're with self.assertRaises(ValueError): - signature = { - 'type': 'bare', - 'type_options': 'not allowed' - } + signature = {"type": "bare", "type_options": "not allowed"} pyopenssl_signer.sign(None, signature, {}) diff --git a/tests.unit/test_repo_resolver.py b/tests.unit/test_repo_resolver.py index 9488c51e..3a9b1c02 100644 --- a/tests.unit/test_repo_resolver.py +++ b/tests.unit/test_repo_resolver.py @@ -16,45 +16,33 @@ from edk2toolext.environment import repo_resolver from edk2toollib.utility_functions import RemoveTree -branch_dependency = { - "Url": "https://github.com/microsoft/mu", - "Path": "test_repo", - "Branch": "master" -} +branch_dependency = {"Url": "https://github.com/microsoft/mu", "Path": "test_repo", "Branch": "master"} -sub_branch_dependency = { - "Url": "https://github.com/microsoft/mu", - "Path": "test_repo", - "Branch": "gh-pages" -} +sub_branch_dependency = {"Url": "https://github.com/microsoft/mu", "Path": "test_repo", "Branch": "gh-pages"} commit_dependency = { "Url": "https://github.com/microsoft/mu", "Path": "test_repo", - "Commit": "b1e35a5d2bf05fb7f58f5b641a702c70d6b32a98" + "Commit": "b1e35a5d2bf05fb7f58f5b641a702c70d6b32a98", } -short_commit_dependency = { - "Url": "https://github.com/microsoft/mu", - "Path": "test_repo", - "Commit": "b1e35a5" -} +short_commit_dependency = {"Url": "https://github.com/microsoft/mu", "Path": "test_repo", "Commit": "b1e35a5"} commit_later_dependency = { "Url": "https://github.com/microsoft/mu", "Path": "test_repo", - "Commit": "e28910950c52256eb620e35d111945cdf5d002d1" + "Commit": "e28910950c52256eb620e35d111945cdf5d002d1", } microsoft_commit_dependency = { "Url": "https://github.com/Microsoft/microsoft.github.io", "Path": "test_repo", - "Commit": "e9153e69c82068b45609359f86554a93569d76f1" + "Commit": "e9153e69c82068b45609359f86554a93569d76f1", } microsoft_branch_dependency = { "Url": "https://github.com/Microsoft/microsoft.github.io", "Path": "test_repo", - "Commit": "e9153e69c82068b45609359f86554a93569d76f1" + "Commit": "e9153e69c82068b45609359f86554a93569d76f1", } test_dir = None @@ -96,7 +84,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.setLevel(logging.DEBUG) logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -111,8 +99,8 @@ def test_clone_branch_repo(self): repo_resolver.resolve(test_dir, branch_dependency) folder_path = os.path.join(test_dir, branch_dependency["Path"]) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) - self.assertEqual(details["Branch"], branch_dependency['Branch']) + self.assertEqual(details["Url"], branch_dependency["Url"]) + self.assertEqual(details["Branch"], branch_dependency["Branch"]) def test_clone_branch_existing_folder(self): # Resolve when folder exists but is empty @@ -120,16 +108,15 @@ def test_clone_branch_existing_folder(self): os.makedirs(folder_path) repo_resolver.resolve(test_dir, branch_dependency) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) - self.assertEqual(details["Branch"], branch_dependency['Branch']) + self.assertEqual(details["Url"], branch_dependency["Url"]) + self.assertEqual(details["Branch"], branch_dependency["Branch"]) # don't create a git repo, create the folder, add a file, try to clone in the folder, it should throw an exception def test_wont_delete_files(self): folder_path = os.path.join(test_dir, commit_dependency["Path"]) os.makedirs(folder_path) file_path = os.path.join(folder_path, "test.txt") - file_path = os.path.join( - test_dir, branch_dependency["Path"], "test.txt") + file_path = os.path.join(test_dir, branch_dependency["Path"], "test.txt") out_file = open(file_path, "w+") out_file.write("Make sure we don't delete this") out_file.close() @@ -150,10 +137,10 @@ def test_will_delete_files(self): self.assertTrue(os.path.exists(file_path)) try: repo_resolver.resolve(test_dir, commit_dependency, force=True) - except: + except Exception: self.fail("We shouldn't fail when we are forcing") details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], commit_dependency['Url']) + self.assertEqual(details["Url"], commit_dependency["Url"]) # don't create a git repo, create the folder, add a file, try to clone in the folder, will ignore it. def test_will_ignore_files(self): @@ -197,7 +184,7 @@ def test_will_delete_dirty_repo(self): try: repo_resolver.resolve(test_dir, commit_later_dependency, force=True) - except: + except Exception: self.fail("We shouldn't fail when we are forcing") # check to make sure we can clone a commit correctly @@ -207,8 +194,8 @@ def test_clone_commit_repo(self): folder_path = os.path.join(test_dir, commit_dependency["Path"]) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], commit_dependency['Url']) - self.assertEqual(details["Head"]["HexSha"], commit_dependency['Commit']) + self.assertEqual(details["Url"], commit_dependency["Url"]) + self.assertEqual(details["Head"]["HexSha"], commit_dependency["Commit"]) # check to make sure we support short commits def test_clone_short_commit_repo(self): @@ -216,8 +203,8 @@ def test_clone_short_commit_repo(self): folder_path = os.path.join(test_dir, short_commit_dependency["Path"]) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], short_commit_dependency['Url']) - self.assertEqual(details["Head"]["HexShaShort"], short_commit_dependency['Commit']) + self.assertEqual(details["Url"], short_commit_dependency["Url"]) + self.assertEqual(details["Head"]["HexShaShort"], short_commit_dependency["Commit"]) # Resolve again, making sure we don't fail if repo already exists. repo_resolver.resolve(test_dir, short_commit_dependency) @@ -228,15 +215,15 @@ def test_fail_update(self): folder_path = os.path.join(test_dir, commit_dependency["Path"]) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], commit_dependency['Url']) - self.assertEqual(details["Head"]["HexSha"], commit_dependency['Commit']) + self.assertEqual(details["Url"], commit_dependency["Url"]) + self.assertEqual(details["Head"]["HexSha"], commit_dependency["Commit"]) # first we checkout with self.assertRaises(Exception): repo_resolver.resolve(test_dir, commit_later_dependency) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], commit_dependency['Url']) - self.assertEqual(details["Head"]["HexSha"], commit_dependency['Commit']) + self.assertEqual(details["Url"], commit_dependency["Url"]) + self.assertEqual(details["Head"]["HexSha"], commit_dependency["Commit"]) def test_does_update(self): # create an empty directory- and set that as the workspace @@ -246,19 +233,18 @@ def test_does_update(self): logging.info(f"Getting details at {folder_path}") details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], commit_dependency['Url']) - self.assertEqual(details["Head"]["HexSha"], commit_dependency['Commit']) + self.assertEqual(details["Url"], commit_dependency["Url"]) + self.assertEqual(details["Head"]["HexSha"], commit_dependency["Commit"]) # next we checkout and go to the later commit try: - repo_resolver.resolve( - test_dir, commit_later_dependency, update_ok=True) - except: + repo_resolver.resolve(test_dir, commit_later_dependency, update_ok=True) + except Exception: self.fail("We are not supposed to throw an exception") details = repo_resolver.repo_details(folder_path) logging.info(f"Checking {folder_path} for current git commit") - self.assertEqual(details["Url"], commit_later_dependency['Url']) - self.assertEqual(details["Head"]["HexSha"], commit_later_dependency['Commit']) + self.assertEqual(details["Url"], commit_later_dependency["Url"]) + self.assertEqual(details["Head"]["HexSha"], commit_later_dependency["Commit"]) def test_cant_switch_urls(self): # create an empty directory- and set that as the workspace @@ -267,13 +253,13 @@ def test_cant_switch_urls(self): details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) + self.assertEqual(details["Url"], branch_dependency["Url"]) # first we checkout with self.assertRaises(Exception): repo_resolver.resolve(test_dir, microsoft_branch_dependency) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) + self.assertEqual(details["Url"], branch_dependency["Url"]) def test_ignore(self): # create an empty directory- and set that as the workspace @@ -282,14 +268,13 @@ def test_ignore(self): details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) + self.assertEqual(details["Url"], branch_dependency["Url"]) # first we checkout - repo_resolver.resolve( - test_dir, microsoft_branch_dependency, ignore=True) + repo_resolver.resolve(test_dir, microsoft_branch_dependency, ignore=True) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) + self.assertEqual(details["Url"], branch_dependency["Url"]) def test_will_switch_urls(self): # create an empty directory- and set that as the workspace @@ -299,28 +284,26 @@ def test_will_switch_urls(self): details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) + self.assertEqual(details["Url"], branch_dependency["Url"]) # first we checkout try: - repo_resolver.resolve( - test_dir, microsoft_branch_dependency, force=True) - except: + repo_resolver.resolve(test_dir, microsoft_branch_dependency, force=True) + except Exception: self.fail("We shouldn't fail when we are forcing") details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], microsoft_branch_dependency['Url']) + self.assertEqual(details["Url"], microsoft_branch_dependency["Url"]) def test_will_switch_branches(self): repo_resolver.resolve(test_dir, branch_dependency) folder_path = os.path.join(test_dir, branch_dependency["Path"]) repo_resolver.resolve(test_dir, sub_branch_dependency, force=True) details = repo_resolver.repo_details(folder_path) - self.assertEqual(details["Url"], branch_dependency['Url']) - self.assertEqual(details["Branch"], sub_branch_dependency['Branch']) + self.assertEqual(details["Url"], branch_dependency["Url"]) + self.assertEqual(details["Branch"], sub_branch_dependency["Branch"]) def test_submodule(self): - - class Submodule(): + class Submodule: def __init__(self, path, recursive): self.path = path self.recursive = recursive @@ -332,7 +315,7 @@ def __init__(self, path, recursive): os.mkdir(os.path.join(temp_folder, "Build")) tmp_file = os.path.join(temp_folder, "Build", "tempfile.txt") - with open(tmp_file, 'x') as f: + with open(tmp_file, "x") as f: f.write("Temp edit") self.assertTrue(os.path.isfile(tmp_file)) @@ -352,17 +335,17 @@ def test_resolve_all(tmpdir: pathlib.Path): { "Url": "https://github.com/octocat/Spoon-Knife", "Path": "repo1", - "Commit": "a30c19e3f13765a3b48829788bc1cb8b4e95cee4" + "Commit": "a30c19e3f13765a3b48829788bc1cb8b4e95cee4", }, { "Url": "https://github.com/octocat/Spoon-Knife", "Path": "repo2", - "Commit": "bb4cc8d3b2e14b3af5df699876dd4ff3acd00b7f" + "Commit": "bb4cc8d3b2e14b3af5df699876dd4ff3acd00b7f", }, { "Url": "https://github.com/octocat/Spoon-Knife", "Path": "repo3", - "Commit": "d0dd1f61b33d64e29d8bc1372a94ef6a2fee76a9" + "Commit": "d0dd1f61b33d64e29d8bc1372a94ef6a2fee76a9", }, ] diff --git a/tests.unit/test_rust_environment.py b/tests.unit/test_rust_environment.py index 25e7691c..d03427cf 100644 --- a/tests.unit/test_rust_environment.py +++ b/tests.unit/test_rust_environment.py @@ -307,9 +307,7 @@ def test_verify_rust_src_component_is_installed( mock_run_cmd: MagicMock, ): mock_run_cmd.side_effect = self._mock_run_cmd - mock_get_workspace_toolchain_version.side_effect = ( - self._mock_get_workspace_toolchain_version - ) + mock_get_workspace_toolchain_version.side_effect = self._mock_get_workspace_toolchain_version result = _verify_rust_src_component_is_installed() self.assertTrue(result) @@ -333,9 +331,7 @@ def test_get_required_tool_versions(self): assert tool_versions == {} @patch("edk2toolext.environment.rust.RunCmd") - def test_verify_workspace_rust_toolchain_is_installed( - self, mock_run_cmd: MagicMock - ): + def test_verify_workspace_rust_toolchain_is_installed(self, mock_run_cmd: MagicMock): mock_run_cmd.side_effect = self._mock_run_cmd # Test when the toolchain is not found @@ -344,9 +340,7 @@ def test_verify_workspace_rust_toolchain_is_installed( assert toolchain_info.toolchain is None # Test when the toolchain is found and stable - with patch( - "builtins.open", mock_open(read_data='[toolchain]\nchannel = "stable"') - ): + with patch("builtins.open", mock_open(read_data='[toolchain]\nchannel = "stable"')): toolchain_info = verify_workspace_rust_toolchain_is_installed() assert not toolchain_info.error assert toolchain_info.toolchain == "stable" diff --git a/tests.unit/test_secureboot_audit.py b/tests.unit/test_secureboot_audit.py index c9ae5922..18209e89 100644 --- a/tests.unit/test_secureboot_audit.py +++ b/tests.unit/test_secureboot_audit.py @@ -24,13 +24,12 @@ TEST_DATA_PARENT_DIRECTORY = os.path.join( os.path.dirname(os.path.abspath(__file__)), # Test folder is relative to the test "testdata", - "secureboot_audit" + "secureboot_audit", ) TEST_HASH = "80B4D96931BF0D02FD91A61E19D14F1DA452E66DB2408CA8604D411F92659F0A" class TestSecureBootReport(unittest.TestCase): - def test_parse_dbx(self): """Test that we can parse the dbx file""" dbx_file = os.path.join(TEST_DATA_PARENT_DIRECTORY, "dbx.bin") @@ -81,7 +80,7 @@ def test_filter_list_by_arch(self): for rev in filtered_revocations: self.assertEqual(filtered_revocations[rev]["arch"], "arm64") - def test_convert_uefi_org_revocation_file_to_dict(self): + def test_convert_uefi_org_revocation_file_to_dict1(self): """Test that we can convert the uefi.org revocation file to a dict""" xlsx_file = os.path.join(TEST_DATA_PARENT_DIRECTORY, "dbx_info_2020_2023_uefiorg_v3.xlsx") @@ -94,7 +93,7 @@ def test_convert_uefi_org_revocation_file_to_dict(self): self.assertEqual(revocations, expected_revocations) - def test_convert_uefi_org_revocation_file_to_dict(self): + def test_convert_uefi_org_revocation_file_to_dict2(self): """Test that we can convert the uefi.org revocation file to a dict""" csv_file = os.path.join(TEST_DATA_PARENT_DIRECTORY, "dbx_info_2020_2023_uefiorg_v3.csv") @@ -118,7 +117,7 @@ def test_write_xlsx_file(self): dbx_report = json.load(dbx_fs) with tempfile.TemporaryDirectory() as td: - test_file = os.path.join(td, 'test.xlsx') + test_file = os.path.join(td, "test.xlsx") write_xlsx_file(dbx_report, test_file) self.assertEqual(os.path.exists(test_file), True) @@ -131,7 +130,7 @@ def test_write_json_file(self): dbx_report = json.load(dbx_fs) with tempfile.TemporaryDirectory() as td: - test_file = os.path.join(td, 'test.json') + test_file = os.path.join(td, "test.json") write_json_file(dbx_report, test_file) self.assertEqual(os.path.exists(test_file), True) @@ -142,5 +141,5 @@ def test_write_json_file(self): self.assertEqual(test_report, dbx_report) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_self_describing_environment.py b/tests.unit/test_self_describing_environment.py index 9ca754ef..57877c6a 100644 --- a/tests.unit/test_self_describing_environment.py +++ b/tests.unit/test_self_describing_environment.py @@ -16,7 +16,6 @@ class Testself_describing_environment(unittest.TestCase): - def setUp(self): self.workspace = pathlib.Path(tempfile.mkdtemp()).resolve() # we need to make sure to tear down the version aggregator and the SDE @@ -28,24 +27,48 @@ def test_null_init(self): self.assertIsNotNone(sde) def test_unique_scopes_required(self): - ''' make sure the sde will throw exception if duplicate scopes are specified ''' + """make sure the sde will throw exception if duplicate scopes are specified""" scopes = ("corebuild", "corebuild", "testing", "CoreBuild") with self.assertRaises(ValueError): self_describing_environment.self_describing_environment(self.workspace, scopes) def test_collect_path_env(self): - ''' makes sure the SDE can collect path env ''' + """makes sure the SDE can collect path env""" scopes = ("global",) tree = uefi_tree(self.workspace, create_platform=False) - tree.create_path_env("testing_corebuild", var_name="hey", flags=["set_path", ]) - tree.create_path_env("testing_corebuild2", var_name="hey", flags=["set_pypath", ]) - tree.create_path_env("testing_corebuild3", var_name="hey", flags=["set_build_var", ]) - tree.create_path_env("testing_corebuild4", var_name="hey", flags=["set_shell_var", ]) + tree.create_path_env( + "testing_corebuild", + var_name="hey", + flags=[ + "set_path", + ], + ) + tree.create_path_env( + "testing_corebuild2", + var_name="hey", + flags=[ + "set_pypath", + ], + ) + tree.create_path_env( + "testing_corebuild3", + var_name="hey", + flags=[ + "set_build_var", + ], + ) + tree.create_path_env( + "testing_corebuild4", + var_name="hey", + flags=[ + "set_shell_var", + ], + ) build_env, shell_env = self_describing_environment.BootstrapEnvironment(self.workspace, scopes) self.assertEqual(len(build_env.paths), 4) def test_collect_path_env_scoped(self): - ''' makes sure the SDE can collect path env with the right scopes ''' + """makes sure the SDE can collect path env with the right scopes""" scopes = ("global", "testing") tree = uefi_tree(self.workspace, create_platform=False) tree.create_path_env("testing_corebuild", scope="testing") @@ -54,44 +77,48 @@ def test_collect_path_env_scoped(self): self.assertEqual(len(build_env.paths), 1) def test_override_path_env(self): - ''' checks the SDE descriptor override system ''' + """checks the SDE descriptor override system""" custom_scope = "global" scopes = (custom_scope,) tree = uefi_tree(self.workspace, create_platform=False) tree.create_path_env("testing_corebuild", var_name="hey", dir_path="test1", scope=custom_scope) - tree.create_path_env("testing_corebuild2", var_name="jokes", scope=custom_scope, - extra_data={"override_id": "testing_corebuild"}) + tree.create_path_env( + "testing_corebuild2", var_name="jokes", scope=custom_scope, extra_data={"override_id": "testing_corebuild"} + ) build_env, shell_env = self_describing_environment.BootstrapEnvironment(self.workspace, scopes) self.assertEqual(len(build_env.paths), 1) def test_multiple_override_path_env(self): - ''' checks the SDE descriptor override system will throw an error on multiple overrides''' + """checks the SDE descriptor override system will throw an error on multiple overrides""" custom_scope = "global" scopes = (custom_scope,) tree = uefi_tree(self.workspace, create_platform=False) tree.create_path_env("testing_corebuild", var_name="hey", dir_path="test1", scope=custom_scope) - tree.create_path_env("testing_corebuild2", var_name="jokes", scope=custom_scope, - extra_data={"override_id": "testing_corebuild"}) - tree.create_path_env("testing_corebuild3", var_name="laughs", scope=custom_scope, - extra_data={"override_id": "testing_corebuild"}) + tree.create_path_env( + "testing_corebuild2", var_name="jokes", scope=custom_scope, extra_data={"override_id": "testing_corebuild"} + ) + tree.create_path_env( + "testing_corebuild3", var_name="laughs", scope=custom_scope, extra_data={"override_id": "testing_corebuild"} + ) # we should get an exception because we have two overrides with self.assertRaises(RuntimeError): build_env, shell_env = self_describing_environment.BootstrapEnvironment(self.workspace, scopes) self.fail() def test_override_path_env_swapped_order(self): - ''' checks the SDE descriptor override system with reversed paths so they are discovered in opposite order''' + """checks the SDE descriptor override system with reversed paths so they are discovered in opposite order""" custom_scope = "global" scopes = (custom_scope,) tree = uefi_tree(self.workspace, create_platform=False) tree.create_path_env("testing_corebuild", var_name="hey", scope=custom_scope) - tree.create_path_env(var_name="jokes", dir_path="test1", scope=custom_scope, - extra_data={"override_id": "testing_corebuild"}) + tree.create_path_env( + var_name="jokes", dir_path="test1", scope=custom_scope, extra_data={"override_id": "testing_corebuild"} + ) build_env, shell_env = self_describing_environment.BootstrapEnvironment(self.workspace, scopes) self.assertEqual(len(build_env.paths), 1) def test_duplicate_id_path_env(self): - ''' check that the SDE will throw an exception if path_env have duplicate id's ''' + """check that the SDE will throw an exception if path_env have duplicate id's""" custom_scope = "global" scopes = (custom_scope,) tree = uefi_tree(self.workspace, create_platform=False) @@ -102,9 +129,9 @@ def test_duplicate_id_path_env(self): self.fail() def test_duplicate_id_path_env_2(self): - ''' check that the SDE will throw an exception if path env have duplicate id's. + """check that the SDE will throw an exception if path env have duplicate id's. Since id is not a required member of path env make sure it can handle case where one of the path - env files doesn't define an id''' + env files doesn't define an id""" custom_scope = "global" scopes = (custom_scope,) tree = uefi_tree(self.workspace, create_platform=False) @@ -133,8 +160,8 @@ def test_git_worktree(self): self.assertEqual(len(repo.branches), 1) repo.git.worktree("add", "my_worktree") - self_describing_environment.BootstrapEnvironment(self.workspace, ('global',)) + self_describing_environment.BootstrapEnvironment(self.workspace, ("global",)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_shell_environment.py b/tests.unit/test_shell_environment.py index 17a1cc38..e2f9b40a 100644 --- a/tests.unit/test_shell_environment.py +++ b/tests.unit/test_shell_environment.py @@ -14,7 +14,6 @@ class TestShellEnvironmentAssumptions(unittest.TestCase): - def test_shell_should_be_a_singleton(self): shell_a = SE.ShellEnvironment() shell_b = SE.ShellEnvironment() @@ -26,46 +25,46 @@ def test_shell_tests_need_to_be_able_to_clear_singleton(self): def test_shell_should_always_have_an_initial_checkpoint(self): shell_env = SE.ShellEnvironment() - self.assertTrue((len(shell_env.checkpoints) > 0), - "a new instance of ShellEnvironment should have at least one checkpoint") + self.assertTrue( + (len(shell_env.checkpoints) > 0), "a new instance of ShellEnvironment should have at least one checkpoint" + ) class TestBasicEnvironmentManipulation(unittest.TestCase): - def test_can_set_os_vars(self): shell_env = SE.ShellEnvironment() # Remove the test var, if it exists. os.environ.pop("SE-TEST-VAR-1", None) # Set a new value and get it directly from the environment. - new_value = 'Dummy' - shell_env.set_shell_var('SE-TEST-VAR-1', new_value) - self.assertEqual(os.environ['SE-TEST-VAR-1'], new_value) + new_value = "Dummy" + shell_env.set_shell_var("SE-TEST-VAR-1", new_value) + self.assertEqual(os.environ["SE-TEST-VAR-1"], new_value) with self.assertRaises(ValueError): - shell_env.set_shell_var('SE-TEST-VAR-FAIL', None) + shell_env.set_shell_var("SE-TEST-VAR-FAIL", None) def test_can_get_os_vars(self): shell_env = SE.ShellEnvironment() - new_value = 'Dummy2' - shell_env.set_shell_var('SE-TEST-VAR-2', new_value) - self.assertEqual(shell_env.get_shell_var('SE-TEST-VAR-2'), new_value) + new_value = "Dummy2" + shell_env.set_shell_var("SE-TEST-VAR-2", new_value) + self.assertEqual(shell_env.get_shell_var("SE-TEST-VAR-2"), new_value) def test_set_path_string(self): shell_env = SE.ShellEnvironment() # Test pass 1. - testpath_elems = ['MY_PATH'] + testpath_elems = ["MY_PATH"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_path(testpath_string) - self.assertEqual(os.environ['PATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_path, "the active path should contain all elements") # Test pass 2. - testpath_elems = ['/bin/bash', 'new_path', '/root'] + testpath_elems = ["/bin/bash", "new_path", "/root"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_path(testpath_string) - self.assertEqual(os.environ['PATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_path, "the active path should contain all elements") @@ -73,18 +72,18 @@ def test_set_path_elements(self): shell_env = SE.ShellEnvironment() # Test pass 1. - testpath_elems = ['MY_PATH'] + testpath_elems = ["MY_PATH"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_path(testpath_elems) - self.assertEqual(os.environ['PATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_path, "the active path should contain all elements") # Test pass 2. - testpath_elems = ['/bin/bash', 'new_path', '/root'] + testpath_elems = ["/bin/bash", "new_path", "/root"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_path(testpath_elems) - self.assertEqual(os.environ['PATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_path, "the active path should contain all elements") @@ -92,19 +91,19 @@ def test_set_pypath_string(self): shell_env = SE.ShellEnvironment() # Test pass 1. - testpath_elems = ['MY_PATH'] + testpath_elems = ["MY_PATH"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_string) - self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PYTHONPATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements") # Test pass 2. - testpath_elems = ['/bin/bash', 'new_path', '/root'] + testpath_elems = ["/bin/bash", "new_path", "/root"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_string) - self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PYTHONPATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements") @@ -113,19 +112,19 @@ def test_set_pypath_elements(self): shell_env = SE.ShellEnvironment() # Test pass 1. - testpath_elems = ['MY_PATH'] + testpath_elems = ["MY_PATH"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_elems) - self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PYTHONPATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements") # Test pass 2. - testpath_elems = ['/bin/bash', 'new_path', '/root'] + testpath_elems = ["/bin/bash", "new_path", "/root"] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_elems) - self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") + self.assertEqual(os.environ["PYTHONPATH"], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements") @@ -134,15 +133,15 @@ def test_insert_append_remove_replace_path(self): shell_env = SE.ShellEnvironment() # Start with a known PATH - mid_elem = 'MIDDLEPATH' + mid_elem = "MIDDLEPATH" shell_env.set_path(mid_elem) self.assertEqual(1, len(shell_env.active_path)) self.assertIn(mid_elem, shell_env.active_path) # Add an element to the end. - end_elem = 'ENDPATH' + end_elem = "ENDPATH" shell_env.append_path(end_elem) # Add an element to the beginning. - start_elem = 'STARTPATH' + start_elem = "STARTPATH" shell_env.insert_path(start_elem) # Test for the realities. @@ -168,7 +167,7 @@ def test_insert_append_remove_replace_path(self): self.assertEqual(shell_env.active_path[2], end_elem) # Test replacing an element on the path - new_mid_elem = 'NEWMIDDLEPATH' + new_mid_elem = "NEWMIDDLEPATH" shell_env.replace_path_element(mid_elem, new_mid_elem) self.assertEqual(shell_env.active_path[1], new_mid_elem) @@ -193,15 +192,15 @@ def test_insert_append_remove_replace_pypath(self): shell_env = SE.ShellEnvironment() # Start with a known PATH - mid_elem = 'MIDDLEPATH' + mid_elem = "MIDDLEPATH" shell_env.set_pypath(mid_elem) self.assertEqual(1, len(shell_env.active_pypath)) self.assertIn(mid_elem, shell_env.active_pypath) # Add an element to the end. - end_elem = 'ENDPATH' + end_elem = "ENDPATH" shell_env.append_pypath(end_elem) # Add an element to the beginning. - start_elem = 'STARTPATH' + start_elem = "STARTPATH" shell_env.insert_pypath(start_elem) # Test for the realities. @@ -214,7 +213,7 @@ def test_insert_append_remove_replace_pypath(self): self.assertIn(elem, sys.path) # Test replacing an element on the pypath - new_mid_elem = 'NEWMIDDLEPATH' + new_mid_elem = "NEWMIDDLEPATH" shell_env.replace_pypath_element(mid_elem, new_mid_elem) self.assertEqual(shell_env.active_pypath[1], new_mid_elem) @@ -230,8 +229,8 @@ def test_insert_append_remove_replace_pypath(self): def test_can_set_and_get_build_vars(self): shell_env = SE.ShellEnvironment() - var_name = 'SE-TEST-VAR-3' - var_data = 'Dummy3' + var_name = "SE-TEST-VAR-3" + var_data = "Dummy3" # Make sure it doesn't exist beforehand. self.assertIs(shell_env.get_build_var(var_name), None, "test var should not exist before creation") shell_env.set_build_var(var_name, var_data) @@ -240,9 +239,9 @@ def test_can_set_and_get_build_vars(self): def test_set_build_vars_should_default_overrideable(self): shell_env = SE.ShellEnvironment() - var_name = 'SE_TEST_VAR_4' - var_data = 'NewData1' - var_data2 = 'NewerData1' + var_name = "SE_TEST_VAR_4" + var_data = "NewData1" + var_data2 = "NewerData1" self.assertIs(shell_env.get_build_var(var_name), None, "test var should not exist before creation") shell_env.set_build_var(var_name, var_data) @@ -252,7 +251,6 @@ def test_set_build_vars_should_default_overrideable(self): class TestShellEnvironmenCheckpoints(unittest.TestCase): - def setUp(self): # Grab the singleton and restore the initial checkpoint. shell_env = SE.ShellEnvironment() @@ -264,7 +262,7 @@ def test_restore_initial_checkpoint_should_erase_changes(self): shell_env = SE.ShellEnvironment() # Check to make sure the change doesn't exist. - test_path_change = '/SE/TEST/PATH/1' + test_path_change = "/SE/TEST/PATH/1" self.assertNotIn(test_path_change, shell_env.active_path, "starting condition should not have the test change") # Make the change and verify. @@ -272,20 +270,20 @@ def test_restore_initial_checkpoint_should_erase_changes(self): self.assertIn(test_path_change, shell_env.active_path) # Add a shell_var while we're at it. - self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), None) - shell_env.set_shell_var('i_should_not_exist', 'a_value') - self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), 'a_value') + self.assertEqual(shell_env.get_shell_var("i_should_not_exist"), None) + shell_env.set_shell_var("i_should_not_exist", "a_value") + self.assertEqual(shell_env.get_shell_var("i_should_not_exist"), "a_value") # Restore initial checkpoint and verify change is gone. shell_env.restore_initial_checkpoint() self.assertNotIn(test_path_change, shell_env.active_path, "restoring checkpoint should remove test change") - self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), None) + self.assertEqual(shell_env.get_shell_var("i_should_not_exist"), None) def test_checkpoint_indices_should_be_unique(self): shell_env = SE.ShellEnvironment() - shell_env.append_path('/SE/TEST/PATH/1') + shell_env.append_path("/SE/TEST/PATH/1") check_point1 = shell_env.checkpoint() - shell_env.append_path('/SE/TEST/PATH/2') + shell_env.append_path("/SE/TEST/PATH/2") check_point2 = shell_env.checkpoint() self.assertNotEqual(check_point1, SE.ShellEnvironment.INITIAL_CHECKPOINT) @@ -296,7 +294,7 @@ def test_restore_new_checkpoint_should_contain_new_changes(self): shell_env = SE.ShellEnvironment() # Check to make sure the change doesn't exist. - test_path_change = '/SE/TEST/PATH/3' + test_path_change = "/SE/TEST/PATH/3" self.assertNotIn(test_path_change, shell_env.active_path, "starting condition should not have the test change") # Make the change and checkpoint. @@ -306,8 +304,9 @@ def test_restore_new_checkpoint_should_contain_new_changes(self): # Restore initial checkpoint and verify change is gone. shell_env.restore_initial_checkpoint() - self.assertNotIn(test_path_change, shell_env.active_path, - "restoring initial checkpoint should remove test change") + self.assertNotIn( + test_path_change, shell_env.active_path, "restoring initial checkpoint should remove test change" + ) # Restore new checkpoint and verify change is back. shell_env.restore_checkpoint(check_point1) @@ -318,13 +317,13 @@ def test_checkpointed_objects_should_behave_correctly(self): # This test is to make sure that pass-by-reference elements don't persist unexpectedly. - test_var1_name = 'SE_TEST_VAR_3' - test_var1_data = 'MyData1' - test_var1_data2 = 'RevisedData1' - test_var1_data3 = 'MoreRevisedData1' + test_var1_name = "SE_TEST_VAR_3" + test_var1_data = "MyData1" + test_var1_data2 = "RevisedData1" + test_var1_data3 = "MoreRevisedData1" - test_var2_name = 'SE_TEST_VAR_4' - test_var2_data = 'MyData2' + test_var2_name = "SE_TEST_VAR_4" + test_var2_data = "MyData2" # Set the first data and make a checkpoint. shell_env.set_build_var(test_var1_name, test_var1_data) @@ -354,7 +353,6 @@ def test_checkpointed_objects_should_behave_correctly(self): class TestShellEnvironmenSpecialBuildVars(unittest.TestCase): - def setUp(self): # Grab the singleton and restore the initial checkpoint. shell_env = SE.ShellEnvironment() @@ -366,10 +364,10 @@ def test_get_build_vars_should_update_vars(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() - test_var_name = 'SE_TEST_VAR_4' - test_var_data = 'NewData1' + test_var_name = "SE_TEST_VAR_4" + test_var_data = "NewData1" - build_vars.SetValue(test_var_name, test_var_data, 'random set') + build_vars.SetValue(test_var_name, test_var_data, "random set") self.assertEqual(shell_env.get_build_var(test_var_name), test_var_data) @@ -377,12 +375,12 @@ def test_special_build_vars_should_default_non_overrideable(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() - test_var_name = 'SE_TEST_VAR_4' - test_var_data = 'NewData1' - test_var_data2 = 'NewerData1' + test_var_name = "SE_TEST_VAR_4" + test_var_data = "NewData1" + test_var_data2 = "NewerData1" - build_vars.SetValue(test_var_name, test_var_data, 'random set') - build_vars.SetValue(test_var_name, test_var_data2, 'another random set') + build_vars.SetValue(test_var_name, test_var_data, "random set") + build_vars.SetValue(test_var_name, test_var_data2, "another random set") self.assertEqual(shell_env.get_build_var(test_var_name), test_var_data) @@ -390,20 +388,20 @@ def test_special_build_vars_should_always_update_current(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() - test_var1_name = 'SE_TEST_VAR_update_current1' - test_var1_data = 'NewData1' - test_var1_data2 = 'NewerData1' + test_var1_name = "SE_TEST_VAR_update_current1" + test_var1_data = "NewData1" + test_var1_data2 = "NewerData1" - test_var2_name = 'SE_TEST_VAR_update_current2' - test_var2_data = 'NewData2' + test_var2_name = "SE_TEST_VAR_update_current2" + test_var2_data = "NewData2" # Make a change and checkpoint. - build_vars.SetValue(test_var1_name, test_var1_data, 'var1 set', overridable=True) + build_vars.SetValue(test_var1_name, test_var1_data, "var1 set", overridable=True) shell_env.checkpoint() # Make a couple more changes. - build_vars.SetValue(test_var1_name, test_var1_data2, 'var1 set', overridable=True) - build_vars.SetValue(test_var2_name, test_var2_data, 'var2 set', overridable=True) + build_vars.SetValue(test_var1_name, test_var1_data2, "var1 set", overridable=True) + build_vars.SetValue(test_var2_name, test_var2_data, "var2 set", overridable=True) # Make sure that the newer changes are valid. self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data2) @@ -416,21 +414,21 @@ def test_special_build_vars_should_be_checkpointable(self): # This test is basically a rehash of the object checkpointing test, # but this time with the special vars. - test_var1_name = 'SE_TEST_VAR_3' - test_var1_data = 'MyData1' - test_var1_data2 = 'RevisedData1' - test_var1_data3 = 'MoreRevisedData1' + test_var1_name = "SE_TEST_VAR_3" + test_var1_data = "MyData1" + test_var1_data2 = "RevisedData1" + test_var1_data3 = "MoreRevisedData1" - test_var2_name = 'SE_TEST_VAR_4' - test_var2_data = 'MyData2' + test_var2_name = "SE_TEST_VAR_4" + test_var2_data = "MyData2" # Set the first data and make a checkpoint. - build_vars.SetValue(test_var1_name, test_var1_data, 'var1 set', overridable=True) + build_vars.SetValue(test_var1_name, test_var1_data, "var1 set", overridable=True) check_point1 = shell_env.checkpoint() # Update previous value and set second data. Then checkpoint. - build_vars.SetValue(test_var1_name, test_var1_data2, 'var1 set', overridable=True) - build_vars.SetValue(test_var2_name, test_var2_data, 'var2 set', overridable=True) + build_vars.SetValue(test_var1_name, test_var1_data2, "var1 set", overridable=True) + build_vars.SetValue(test_var2_name, test_var2_data, "var2 set", overridable=True) check_point2 = shell_env.checkpoint() # Restore the first checkpoint and verify values. @@ -439,9 +437,12 @@ def test_special_build_vars_should_be_checkpointable(self): self.assertIs(shell_env.get_build_var(test_var2_name), None) # Make a change to be tested later. - build_vars.SetValue(test_var1_name, test_var1_data3, 'var1 set', overridable=True) - self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data3, - 'even after restore, special build vars should always update current') + build_vars.SetValue(test_var1_name, test_var1_data3, "var1 set", overridable=True) + self.assertEqual( + shell_env.get_build_var(test_var1_name), + test_var1_data3, + "even after restore, special build vars should always update current", + ) # Restore the second checkpoint and verify values. shell_env.restore_checkpoint(check_point2) @@ -453,5 +454,5 @@ def test_special_build_vars_should_be_checkpointable(self): self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_signtool_signer.py b/tests.unit/test_signtool_signer.py index 15df359c..0d3c7fd4 100644 --- a/tests.unit/test_signtool_signer.py +++ b/tests.unit/test_signtool_signer.py @@ -12,7 +12,6 @@ class Test_signtool_signer(unittest.TestCase): - @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") def test_get_path(self): path = signtool_signer.get_signtool_path() @@ -20,58 +19,36 @@ def test_get_path(self): @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") def test_sign_with_bad_options(self): - signature = { - "type": "test" - } + signature = {"type": "test"} signer = {} with self.assertRaises(ValueError): signtool_signer.sign(None, signature, signer) @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") def test_sign_with_good_options(self): - signature = { - "type": "pkcs7", - "type_options": ["embedded"], - "encoding": "DER", - "hash_alg": "sha256" - } - signer = { - "key_file": "file.txt", - "key_file_format": "pkcs12" - } + signature = {"type": "pkcs7", "type_options": ["embedded"], "encoding": "DER", "hash_alg": "sha256"} + signer = {"key_file": "file.txt", "key_file_format": "pkcs12"} with self.assertRaises(RuntimeError): signtool_signer.sign(b"data", signature, signer) @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") def test_sign_with_mutually_exclusive_options(self): - signature = { - "type": "pkcs7", - "type_options": ["embedded", "detachedSignedData"] - } + signature = {"type": "pkcs7", "type_options": ["embedded", "detachedSignedData"]} signer = {} with self.assertRaises(ValueError): signtool_signer.sign(b"data", signature, signer) - signature = { - "type": "pkcs7", - "type_options": ["pkcs7DetachedSignedData", "detachedSignedData"] - } + signature = {"type": "pkcs7", "type_options": ["pkcs7DetachedSignedData", "detachedSignedData"]} signer = {} with self.assertRaises(ValueError): signtool_signer.sign(b"data", signature, signer) - signature = { - "type": "pkcs7", - "type_options": ["pkcs7DetachedSignedData", "embedded"] - } + signature = {"type": "pkcs7", "type_options": ["pkcs7DetachedSignedData", "embedded"]} signer = {} with self.assertRaises(ValueError): signtool_signer.sign(b"data", signature, signer) - signature = { - "type": "pkcs7", - "type_options": ["detachedSignedData", "pkcs7DetachedSignedData", "embedded"] - } + signature = {"type": "pkcs7", "type_options": ["detachedSignedData", "pkcs7DetachedSignedData", "embedded"]} signer = {} with self.assertRaises(ValueError): signtool_signer.sign(b"data", signature, signer) diff --git a/tests.unit/test_uefi_build.py b/tests.unit/test_uefi_build.py index 673fed1d..0a8c27b9 100644 --- a/tests.unit/test_uefi_build.py +++ b/tests.unit/test_uefi_build.py @@ -8,7 +8,6 @@ ## import unittest import logging -import pytest from edk2toolext.environment import uefi_build from edk2toolext.environment.plugintypes import uefi_helper_plugin from edk2toolext.environment.plugin_manager import PluginManager @@ -22,7 +21,6 @@ class TestUefiBuild(unittest.TestCase): - def setUp(self): self.WORKSPACE = tempfile.mkdtemp() TestUefiBuild.create_min_uefi_build_tree(self.WORKSPACE) @@ -56,16 +54,15 @@ def create_min_uefi_build_tree(cls, root): conf_folder = os.path.join(root, "Conf") os.makedirs(conf_folder) target_path = os.path.join(conf_folder, "target.template") - TestUefiBuild.write_to_file(target_path, ["ACTIVE_PLATFORM = Test.dsc\n", - "TOOL_CHAIN_TAG = test\n", - "TARGET = DEBUG\n"]) + TestUefiBuild.write_to_file( + target_path, ["ACTIVE_PLATFORM = Test.dsc\n", "TOOL_CHAIN_TAG = test\n", "TARGET = DEBUG\n"] + ) tools_path = os.path.join(conf_folder, "tools_def.template") TestUefiBuild.write_to_file(tools_path, ["*_VS2022_*_*_FAMILY = MSFT"]) build_path = os.path.join(conf_folder, "build_rule.template") TestUefiBuild.write_to_file(build_path, ["hello"]) platform_path = os.path.join(root, "Test.dsc") - TestUefiBuild.write_to_file(platform_path, ["[Defines]\n", - "OUTPUT_DIRECTORY = Build"]) + TestUefiBuild.write_to_file(platform_path, ["[Defines]\n", "OUTPUT_DIRECTORY = Build"]) def test_commandline_options(self): builder = uefi_build.UefiBuilder() @@ -79,7 +76,7 @@ def test_commandline_options(self): ["--UPDATECONF"], ["--FLASHONLY"], ["--SKIPPREBUILD"], - ["--SKIPPOSTBUILD"] + ["--SKIPPOSTBUILD"], ] for argpart in args: results = parserObj.parse_args(argpart) @@ -105,12 +102,8 @@ def test_build_wrapper(self): # Some basic build variables need to be set to make it through # the build preamble to the point the wrapper gets called. - shell_environment.GetBuildVars().SetValue("TARGET_ARCH", - "IA32", - "Set in build wrapper test") - shell_environment.GetBuildVars().SetValue("EDK_TOOLS_PATH", - self.WORKSPACE, - "Set in build wrapper test") + shell_environment.GetBuildVars().SetValue("TARGET_ARCH", "IA32", "Set in build wrapper test") + shell_environment.GetBuildVars().SetValue("EDK_TOOLS_PATH", self.WORKSPACE, "Set in build wrapper test") # "build_wrapper" -> The actual build_wrapper script # "test_file" -> An empty file written by build_wrapper @@ -135,21 +128,16 @@ def test_build_wrapper(self): build_wrapper_cmd = "python" build_wrapper_params = os.path.normpath(build_wrapper_path) - TestUefiBuild.write_to_file( - build_wrapper_path, - cleandoc(build_wrapper_file_content)) + TestUefiBuild.write_to_file(build_wrapper_path, cleandoc(build_wrapper_file_content)) if GetHostInfo().os == "Linux": - os.chmod(build_wrapper_path, - os.stat(build_wrapper_path).st_mode | stat.S_IEXEC) + os.chmod(build_wrapper_path, os.stat(build_wrapper_path).st_mode | stat.S_IEXEC) # This is the main point of this test. The wrapper file should be # executed instead of the build command. In real scenarios, the wrapper # script would subsequently call the build command. - shell_environment.GetBuildVars().SetValue( - "EDK_BUILD_CMD", build_wrapper_cmd, "Set in build wrapper test") - shell_environment.GetBuildVars().SetValue( - "EDK_BUILD_PARAMS", build_wrapper_params, "Set in build wrapper test") + shell_environment.GetBuildVars().SetValue("EDK_BUILD_CMD", build_wrapper_cmd, "Set in build wrapper test") + shell_environment.GetBuildVars().SetValue("EDK_BUILD_PARAMS", build_wrapper_params, "Set in build wrapper test") manager = PluginManager() helper = uefi_helper_plugin.HelperFunctions() @@ -164,6 +152,7 @@ def test_build_wrapper(self): # TODO finish unit test + def test_missing_ENV_variables(tmp_path, caplog): with caplog.at_level(logging.ERROR): TestUefiBuild().create_min_uefi_build_tree(tmp_path) @@ -171,13 +160,11 @@ def test_missing_ENV_variables(tmp_path, caplog): builder = uefi_build.UefiBuilder() manager = PluginManager() helper = uefi_helper_plugin.HelperFunctions() - + # # 1. Make sure we error and log a clean message when TOOL_CHAIN_TAG is missing # - shell_environment.GetBuildVars().SetValue("EDK_TOOLS_PATH", - str(tmp_path), - "Set in build wrapper test") + shell_environment.GetBuildVars().SetValue("EDK_TOOLS_PATH", str(tmp_path), "Set in build wrapper test") os.remove(target_template) TestUefiBuild.write_to_file(target_template, ["ACTIVE_PLATFORM = Test.dsc\n"]) ret = builder.Go(str(tmp_path), "", helper, manager) @@ -193,13 +180,12 @@ def test_missing_ENV_variables(tmp_path, caplog): for file in (tmp_path / "Conf").glob("**/*.txt"): file.unlink() caplog.clear() - + # # 3. Make sure we error and log a clean message when TARGET is missing # os.remove(target_template) - TestUefiBuild.write_to_file(target_template, ["ACTIVE_PLATFORM = Test.dsc\n", - "TOOL_CHAIN_TAG = VS2022\n"]) + TestUefiBuild.write_to_file(target_template, ["ACTIVE_PLATFORM = Test.dsc\n", "TOOL_CHAIN_TAG = VS2022\n"]) ret = builder.Go(str(tmp_path), "", helper, manager) # two error messages are logged when the environment variable is missing @@ -208,5 +194,5 @@ def test_missing_ENV_variables(tmp_path, caplog): assert len(list(filter(lambda r: "TARGET" in r.message, caplog.records))) == 1 -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_var_dict.py b/tests.unit/test_var_dict.py index 439b36bd..d7d3cf65 100644 --- a/tests.unit/test_var_dict.py +++ b/tests.unit/test_var_dict.py @@ -210,5 +210,5 @@ def test_var_dict_non_valued_var(self): self.assertTrue(v.GetValue("var2"), "Should return True") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_version_aggregator.py b/tests.unit/test_version_aggregator.py index b2f780d0..73230753 100644 --- a/tests.unit/test_version_aggregator.py +++ b/tests.unit/test_version_aggregator.py @@ -81,5 +81,5 @@ def test_global_reset(self): self.assertEqual(len(version1.GetAggregatedVersionInformation()), 0) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/test_versioninfo.py b/tests.unit/test_versioninfo.py index 244999ce..854ba29f 100644 --- a/tests.unit/test_versioninfo.py +++ b/tests.unit/test_versioninfo.py @@ -18,10 +18,10 @@ from edk2toolext.versioninfo import versioninfo_tool from edk2toollib.utility_functions import RunCmd -DUMMY_EXE_FILE_NAME = 'dummy_exe' -DUMMY_JSON_FILE_NAME = 'dummy_json_file' -VERSIONINFO_JSON_FILE_NAME = 'VERSIONINFO' -BAD_JSON_FILE_NAME = 'bad_json' +DUMMY_EXE_FILE_NAME = "dummy_exe" +DUMMY_JSON_FILE_NAME = "dummy_json_file" +VERSIONINFO_JSON_FILE_NAME = "VERSIONINFO" +BAD_JSON_FILE_NAME = "bad_json" DUMMY_VALID_JSON = { "Minimal": "False", @@ -40,18 +40,12 @@ "InternalName": "Test Name", "LegalCopyright": "(C) 2048 Dummy Driver", "OriginalFilename": "Dummy.sys", - "ProductName": "Dummy Driver" + "ProductName": "Dummy Driver", }, - "VarFileInfo": { - "Translation": "0x0409 0x04b0" - } + "VarFileInfo": {"Translation": "0x0409 0x04b0"}, } -DUMMY_MINIMAL_JSON = { - "Fileversion": "1,0,0,0", - "OriginalFilename": "Test Name", - "CompanyName": "Test Company" -} +DUMMY_MINIMAL_JSON = {"Fileversion": "1,0,0,0", "OriginalFilename": "Test Name", "CompanyName": "Test Company"} DUMMY_MINIMAL_DECODED = { "FileVersion": "1.0.0.0", @@ -61,13 +55,8 @@ "FileOS": "VOS_UNKNOWN", "FileType": "VFT_UNKNOWN", "FileSubtype": "VFT2_UNKNOWN", - "StringFileInfo": { - "CompanyName": "Test Company", - "OriginalFilename": "Test Name" - }, - "VarFileInfo": { - "Translation": "0x0409 0x04b0" - } + "StringFileInfo": {"CompanyName": "Test Company", "OriginalFilename": "Test Name"}, + "VarFileInfo": {"Translation": "0x0409 0x04b0"}, } @@ -84,7 +73,7 @@ def check_for_err_helper(cls, temp_dir, json_input, err_msg, decode=False): returned_error = not versioninfo_tool.encode_version_info_dump_rc(json_input, temp_dir) logging.getLogger().removeHandler(log_handler) - cls.assertFalse(os.path.isfile(os.path.join(temp_dir, 'VERSIONINFO.rc'))) + cls.assertFalse(os.path.isfile(os.path.join(temp_dir, "VERSIONINFO.rc"))) cls.assertTrue(err_msg in log_stream.getvalue()) cls.assertTrue(returned_error) @@ -93,14 +82,14 @@ def compared_decoded_version_info(self, json_file_path, reference): try: generated_json = open(json_file_path) generated_dict = json.load(generated_json) - self.assertTrue('Signature' in generated_dict) - del generated_dict['Signature'] - self.assertTrue('StrucVersion' in generated_dict) - del generated_dict['StrucVersion'] - if 'FileDateMS' in generated_dict: - del generated_dict['FileDateMS'] - if 'FileDateLS' in generated_dict: - del generated_dict['FileDateLS'] + self.assertTrue("Signature" in generated_dict) + del generated_dict["Signature"] + self.assertTrue("StrucVersion" in generated_dict) + del generated_dict["StrucVersion"] + if "FileDateMS" in generated_dict: + del generated_dict["FileDateMS"] + if "FileDateLS" in generated_dict: + del generated_dict["FileDateLS"] ref = copy.deepcopy(reference) if "Minimal" in ref: del ref["Minimal"] @@ -112,15 +101,14 @@ def compared_decoded_version_info(self, json_file_path, reference): class TestVersioninfo(unittest.TestCase): - def test_encode_decode_full(self): temp_dir = tempfile.mkdtemp() # Create the EXE file - versioned_exe_path = os.path.join(temp_dir, DUMMY_EXE_FILE_NAME) + '.exe' + versioned_exe_path = os.path.join(temp_dir, DUMMY_EXE_FILE_NAME) + ".exe" source_exe_path = os.path.join(os.path.dirname(__file__), "testdata", "versioninfo_full_exe.data") shutil.copyfile(source_exe_path, versioned_exe_path) # Create the parameters that will go to the service request function - version_info_output_path = os.path.join(temp_dir, VERSIONINFO_JSON_FILE_NAME + '.json') + version_info_output_path = os.path.join(temp_dir, VERSIONINFO_JSON_FILE_NAME + ".json") versioninfo_tool.decode_version_info_dump_json(versioned_exe_path, version_info_output_path) # then we compare to make sure it matches what it should be @@ -129,11 +117,11 @@ def test_encode_decode_full(self): def test_encode_decode_minimal(self): temp_dir = tempfile.mkdtemp() # Create the EXE file - versioned_exe_path = os.path.join(temp_dir, DUMMY_EXE_FILE_NAME) + '.exe' + versioned_exe_path = os.path.join(temp_dir, DUMMY_EXE_FILE_NAME) + ".exe" source_exe_path = os.path.join(os.path.dirname(__file__), "testdata", "versioninfo_minimal_exe.data") shutil.copyfile(source_exe_path, versioned_exe_path) # Create the parameters that will go to the service request function - version_info_output_path = os.path.join(temp_dir, VERSIONINFO_JSON_FILE_NAME + '.json') + version_info_output_path = os.path.join(temp_dir, VERSIONINFO_JSON_FILE_NAME + ".json") versioninfo_tool.decode_version_info_dump_json(versioned_exe_path, version_info_output_path) # then we compare to make sure it matches what it should be @@ -150,211 +138,227 @@ def test_encode_minimal(self): def test_missing_varinfo(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - del bad_json['VarFileInfo'] - with open(bad_json_file, 'w') as bad_file: + del bad_json["VarFileInfo"] + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Missing required parameter: VARFILEINFO') + check_for_err_helper(self, temp_dir, bad_json_file, "Missing required parameter: VARFILEINFO") def test_missing_translation(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - del bad_json['VarFileInfo']['Translation'] - with open(bad_json_file, 'w') as bad_file: + del bad_json["VarFileInfo"]["Translation"] + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Missing required parameter in VarFileInfo: Translation') + check_for_err_helper(self, temp_dir, bad_json_file, "Missing required parameter in VarFileInfo: Translation") def test_invalid_varfileinfo(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['VarFileInfo']['FileVersion'] = "1.2.1.2" - with open(bad_json_file, 'w') as bad_file: + bad_json["VarFileInfo"]["FileVersion"] = "1.2.1.2" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid VarFileInfo parameter: FileVersion') + check_for_err_helper(self, temp_dir, bad_json_file, "Invalid VarFileInfo parameter: FileVersion") def test_missing_companyname(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - del bad_json['StringFileInfo']['CompanyName'] - with open(bad_json_file, 'w') as bad_file: + del bad_json["StringFileInfo"]["CompanyName"] + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Missing required StringFileInfo parameter: CompanyName') + check_for_err_helper(self, temp_dir, bad_json_file, "Missing required StringFileInfo parameter: CompanyName") def test_version_overflow(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['ProductVersion'] = '65536.0.0.0' - with open(bad_json_file, 'w') as bad_file: + bad_json["ProductVersion"] = "65536.0.0.0" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Integer overflow in version string') + check_for_err_helper(self, temp_dir, bad_json_file, "Integer overflow in version string") def test_invalid_version(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['ProductVersion'] = 'Version 1.0.1.0' - with open(bad_json_file, 'w') as bad_file: + bad_json["ProductVersion"] = "Version 1.0.1.0" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Version must be in form " INTEGER.INTEGER.INTEGER.INTEGER"') # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, 'Version must be in form " INTEGER.INTEGER.INTEGER.INTEGER"' + ) # noqa def test_bad_version_format(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['ProductVersion'] = '1.234' - with open(bad_json_file, 'w') as bad_file: + bad_json["ProductVersion"] = "1.234" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) check_for_err_helper(self, temp_dir, bad_json_file, 'Version must be in form "INTEGER.INTEGER.INTEGER.INTEGER"') # noqa def test_invalid_language_code_value(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['VarFileInfo']['Translation'] = '"0x0009 0x04b0"' - with open(bad_json_file, 'w') as bad_file: + bad_json["VarFileInfo"]["Translation"] = '"0x0009 0x04b0"' + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, - f"Invalid language code: {bad_json['VarFileInfo']['Translation']}") + check_for_err_helper( + self, temp_dir, bad_json_file, f"Invalid language code: {bad_json['VarFileInfo']['Translation']}" + ) def test_invalid_language_code_string(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['VarFileInfo']['Translation'] = '"utf-8 US"' - with open(bad_json_file, 'w') as bad_file: + bad_json["VarFileInfo"]["Translation"] = '"utf-8 US"' + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, - f"Invalid language code: {bad_json['VarFileInfo']['Translation']}") + check_for_err_helper( + self, temp_dir, bad_json_file, f"Invalid language code: {bad_json['VarFileInfo']['Translation']}" + ) def test_invalid_language_code_format(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['VarFileInfo']['Translation'] = '"0x400904b0"' - with open(bad_json_file, 'w') as bad_file: + bad_json["VarFileInfo"]["Translation"] = '"0x400904b0"' + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Translation field must contain 2 space delimited hexidecimal bytes') # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Translation field must contain 2 space delimited hexidecimal bytes" + ) # noqa def test_invalid_language_code_no_hex(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['VarFileInfo']['Translation'] = '"4009 04b0"' - with open(bad_json_file, 'w') as bad_file: + bad_json["VarFileInfo"]["Translation"] = '"4009 04b0"' + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid language code: ' - + bad_json['VarFileInfo']['Translation']) + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid language code: " + bad_json["VarFileInfo"]["Translation"] + ) def test_invalid_fileos_hex(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileOS'] = '0x12391' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileOS"] = "0x12391" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILEOS value: ' + bad_json['FileOS']) + check_for_err_helper(self, temp_dir, bad_json_file, "Invalid FILEOS value: " + bad_json["FileOS"]) def test_invalid_fileos_string(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileOS'] = 'INVALID' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileOS"] = "INVALID" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILEOS value: ' + bad_json['FileOS']) + check_for_err_helper(self, temp_dir, bad_json_file, "Invalid FILEOS value: " + bad_json["FileOS"]) def test_invalid_filetype_hex(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileType'] = '0x12391' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileType"] = "0x12391" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILESUBTYPE value for FILETYPE ' + bad_json['FileType']) # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid FILESUBTYPE value for FILETYPE " + bad_json["FileType"] + ) # noqa def test_invalid_filetype_string(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileType'] = 'INVALID' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileType"] = "INVALID" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILESUBTYPE value for FILETYPE ' + bad_json['FileType']) # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid FILESUBTYPE value for FILETYPE " + bad_json["FileType"] + ) # noqa def test_invalid_filesubtype_drv(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileType'] = 'VFT_DRV' - bad_json['FileSubtype'] = 'VFT2_FONT_RASTER' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileType"] = "VFT_DRV" + bad_json["FileSubtype"] = "VFT2_FONT_RASTER" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILESUBTYPE value for FILETYPE VFT_DRV: VFT2_FONT_RASTER') # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid FILESUBTYPE value for FILETYPE VFT_DRV: VFT2_FONT_RASTER" + ) # noqa def test_invalid_filesubtype_font(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - bad_json['FileType'] = 'VFT_FONT' - bad_json['FileSubtype'] = 'VFT2_DRV_SOUND' - with open(bad_json_file, 'w') as bad_file: + bad_json["FileType"] = "VFT_FONT" + bad_json["FileSubtype"] = "VFT2_DRV_SOUND" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid FILESUBTYPE value for FILETYPE VFT_FONT: VFT2_DRV_SOUND') # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid FILESUBTYPE value for FILETYPE VFT_FONT: VFT2_DRV_SOUND" + ) # noqa def test_missing_filetype(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - del bad_json['FileType'] - with open(bad_json_file, 'w') as bad_file: + del bad_json["FileType"] + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, - 'Missing parameter: must have FileType if FileSubtype defined') + check_for_err_helper( + self, temp_dir, bad_json_file, "Missing parameter: must have FileType if FileSubtype defined" + ) def test_no_stringinfo_header(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = copy.deepcopy(DUMMY_VALID_JSON) - err_str = '' - for key in bad_json['StringFileInfo']: - if key == 'FileVersion': + err_str = "" + for key in bad_json["StringFileInfo"]: + if key == "FileVersion": continue - bad_json[key] = bad_json['StringFileInfo'][key] - err_str += 'Invalid parameter: ' + key.upper() + '.\n' + bad_json[key] = bad_json["StringFileInfo"][key] + err_str += "Invalid parameter: " + key.upper() + ".\n" - del bad_json['StringFileInfo'] - err_str += 'Missing required parameter: STRINGFILEINFO' - with open(bad_json_file, 'w') as bad_file: + del bad_json["StringFileInfo"] + err_str += "Missing required parameter: STRINGFILEINFO" + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) check_for_err_helper(self, temp_dir, bad_json_file, err_str) def test_bad_input_path(self): - ''' check to make sure we throw an exception ''' + """check to make sure we throw an exception""" try: check_for_err_helper(self, ".", "bad/path/to/file.json", "Could not find bad/path/to/file.json\n") self.fail("We shouldn't have found the file") @@ -362,22 +366,22 @@ def test_bad_input_path(self): pass def test_command_line_interface(self): - ''' makes sure the command line interface is working correctly ''' - ret = RunCmd('versioninfo_tool', '-h', logging_level=logging.ERROR) + """makes sure the command line interface is working correctly""" + ret = RunCmd("versioninfo_tool", "-h", logging_level=logging.ERROR) self.assertEqual(ret, 0) def test_non_pe_file(self): temp_dir = tempfile.mkdtemp() - bad_pe = os.path.join(temp_dir, DUMMY_JSON_FILE_NAME + '.bad') + bad_pe = os.path.join(temp_dir, DUMMY_JSON_FILE_NAME + ".bad") - with open(bad_pe, 'w') as bad_file: + with open(bad_pe, "w") as bad_file: json.dump(DUMMY_VALID_JSON, bad_file) check_for_err_helper(self, temp_dir, bad_pe, "DOS Header magic not found", True) def test_invalid_json_format1(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = """ "FileVersion": "1.2.3.4", "ProductVersion": "1.2.3.4", @@ -401,14 +405,14 @@ def test_invalid_json_format1(self): } """ - with open(bad_json_file, 'w') as bad_file: + with open(bad_json_file, "w") as bad_file: bad_file.write(bad_json) check_for_err_helper(self, temp_dir, bad_json_file, "Invalid JSON format, Extra data") def test_invalid_json_format2(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = """ { FileVersion: 1.2.3.4, ProductVersion: 1.2.3.4, @@ -433,14 +437,16 @@ def test_invalid_json_format2(self): } """ - with open(bad_json_file, 'w') as bad_file: + with open(bad_json_file, "w") as bad_file: bad_file.write(bad_json) - check_for_err_helper(self, temp_dir, bad_json_file, "Invalid JSON format, Expecting property name enclosed in double quotes") # noqa + check_for_err_helper( + self, temp_dir, bad_json_file, "Invalid JSON format, Expecting property name enclosed in double quotes" + ) # noqa def test_invalid_json_format3(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = """ { "FileVersion": "1.2.3.4", "ProductVersion": "1.2.3.4", @@ -465,35 +471,35 @@ def test_invalid_json_format3(self): } """ - with open(bad_json_file, 'w') as bad_file: + with open(bad_json_file, "w") as bad_file: bad_file.write(bad_json) check_for_err_helper(self, temp_dir, bad_json_file, "Invalid JSON format, Expecting ',' delimiter") # noqa def test_invalid_minimal_fields(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = { "Minimal": "True", "FileVersion": "1.2.3.4", "CompanyName": "Test Company", "FileType": "VFT_DRV", } - with open(bad_json_file, 'w') as bad_file: + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) - check_for_err_helper(self, temp_dir, bad_json_file, 'Invalid minimal parameter: FILETYPE') + check_for_err_helper(self, temp_dir, bad_json_file, "Invalid minimal parameter: FILETYPE") def test_invalid_minimal_value(self): temp_dir = tempfile.mkdtemp() - bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + '.json') + bad_json_file = os.path.join(temp_dir, BAD_JSON_FILE_NAME + ".json") bad_json = { "Minimal": "Yes", "FileVersion": "1.2.3.4", "CompanyName": "Test Company", "FileType": "VFT_DRV", } - with open(bad_json_file, 'w') as bad_file: + with open(bad_json_file, "w") as bad_file: json.dump(bad_json, bad_file) check_for_err_helper(self, temp_dir, bad_json_file, "Invalid value for 'Minimal', must be boolean.") diff --git a/tests.unit/test_web_dependency.py b/tests.unit/test_web_dependency.py index d5799494..3e4b770f 100644 --- a/tests.unit/test_web_dependency.py +++ b/tests.unit/test_web_dependency.py @@ -25,7 +25,7 @@ from edk2toollib.utility_functions import RemoveTree test_dir = None -bad_json_file = ''' +bad_json_file = """ { "scope": "global", "type": "web", @@ -37,7 +37,7 @@ "compression_type":"tar", "sha256":"68f2335344c3f7689f8d69125d182404a3515b8daa53a9c330f115739889f998" } -''' +""" # JSON file that describes a single file to download from the internet # bing.com was choosen as it's probably not going anywhere soon and it's small file to download single_file_extdep = { @@ -47,7 +47,7 @@ "source": "https://www.bing.com/", "version": "20190805", "flags": [], - "internal_path": "test.txt" + "internal_path": "test.txt", } # Use the github release zip_directory_extdep = { @@ -58,7 +58,7 @@ "source": "https://github.com/lexxmark/winflexbison/releases/download/v2.4.7/win_flex_bison-2.4.7.zip", "version": "2.4.7", "sha256": "7553a2d6738c799e101ec38a6ad073885ead892826f87bc1a24e78bcd7ac2a8c", - "internal_path": "/." + "internal_path": "/.", } # Use the GNU FTP tar_directory_extdep = { @@ -80,20 +80,20 @@ "version": "3.4.1", "flags": [], "sha256": "5A93A88493AA32AAB228BF4571C01207D3B42B0002409A454D404B4D8395BD55", - "internal_path": "jquery.js" + "internal_path": "jquery.js", } basetools_json_file = { - "scope": "global", - "type": "web", - "name": "Mu-Basetools", - "source": "https://github.com/microsoft/mu_basecore/releases/download/v2023020002.0.3/basetools-v2023020002.0.3.zip", - "version": "v2023020002.0.3", - "sha256": "6eaf5dc61690592e441c92c3150167c40315efb24a3805a05642d5b4f875b008", - "internal_path": "/basetools/", - "compression_type": "zip", - "flags": ["set_shell_var", "set_path", "host_specific"], - "var_name": "EDK_TOOLS_BIN" + "scope": "global", + "type": "web", + "name": "Mu-Basetools", + "source": "https://github.com/microsoft/mu_basecore/releases/download/v2023020002.0.3/basetools-v2023020002.0.3.zip", + "version": "v2023020002.0.3", + "sha256": "6eaf5dc61690592e441c92c3150167c40315efb24a3805a05642d5b4f875b008", + "internal_path": "/basetools/", + "compression_type": "zip", + "flags": ["set_shell_var", "set_path", "host_specific"], + "var_name": "EDK_TOOLS_BIN", } @@ -124,7 +124,7 @@ def setUp(self): @classmethod def setUpClass(cls): - logger = logging.getLogger('') + logger = logging.getLogger("") logger.addHandler(logging.NullHandler()) unittest.installHandler() @@ -154,8 +154,8 @@ def test_single_file(self): ext_dep = WebDependency(ext_dep_descriptor) ext_dep.fetch() - ext_dep_name = single_file_extdep['name'] + "_extdep" - file_path = os.path.join(test_dir, ext_dep_name, single_file_extdep['internal_path']) + ext_dep_name = single_file_extdep["name"] + "_extdep" + file_path = os.path.join(test_dir, ext_dep_name, single_file_extdep["internal_path"]) if not os.path.isfile(file_path): self.fail("The downloaded file isn't there") @@ -170,7 +170,7 @@ def test_sha256_whole_zip_directory(self): ext_dep = WebDependency(ext_dep_descriptor) ext_dep.fetch() - ext_dep_name = zip_directory_extdep['name'] + "_extdep" + ext_dep_name = zip_directory_extdep["name"] + "_extdep" folder_path = os.path.join(test_dir, ext_dep_name) if not os.path.exists(os.path.join(folder_path, "README.txt")): logging.warning(folder_path) @@ -187,7 +187,7 @@ def test_sha256_whole_tar_directory(self): ext_dep = WebDependency(ext_dep_descriptor) ext_dep.fetch() - ext_dep_name = tar_directory_extdep['name'] + "_extdep" + ext_dep_name = tar_directory_extdep["name"] + "_extdep" folder_path = os.path.join(test_dir, ext_dep_name) if not os.path.exists(os.path.join(folder_path, "README")): logging.warning(folder_path) @@ -207,8 +207,8 @@ def test_sha256_uppercase_single_file(self): ext_dep = WebDependency(ext_dep_descriptor) ext_dep.fetch() - ext_dep_name = jquery_json['name'] + "_extdep" - file_path = os.path.join(test_dir, ext_dep_name, jquery_json['internal_path']) + ext_dep_name = jquery_json["name"] + "_extdep" + file_path = os.path.join(test_dir, ext_dep_name, jquery_json["internal_path"]) if not os.path.isfile(file_path): self.fail("The downloaded file isn't there") @@ -224,8 +224,8 @@ def test_sha256_lowercase_single_file(self): ext_dep = WebDependency(ext_dep_descriptor) ext_dep.fetch() - ext_dep_name = jquery_json['name'] + "_extdep" - file_path = os.path.join(test_dir, ext_dep_name, jquery_json['internal_path']) + ext_dep_name = jquery_json["name"] + "_extdep" + file_path = os.path.join(test_dir, ext_dep_name, jquery_json["internal_path"]) if not os.path.isfile(file_path): self.fail("The downloaded file isn't there") @@ -241,7 +241,7 @@ def test_unpack_zip_file(self): with open(file_path, "w+") as ext_dep_file: ext_dep_file.write(bad_json_file) - with zipfile.ZipFile(compressed_file_path, 'w') as _zip: + with zipfile.ZipFile(compressed_file_path, "w") as _zip: _zip.write(file_path, arcname=os.path.basename(file_path)) os.remove(file_path) @@ -276,7 +276,6 @@ def test_unpack_tar_file(self): # Files in test_dir\first_dir\second_dir should be located. # Files in test_dir\first_dir should not be unpacked. def test_unpack_zip_directory(self): - first_level_dir_name = "first_dir" second_level_dir_name = "second_dir" first_level_path = os.path.join(test_dir, first_level_dir_name) @@ -290,14 +289,16 @@ def test_unpack_zip_directory(self): # only files inside internal_path should be there after unpack # (file path, is this file expected to be unpacked?) - test_files = [(os.path.join(test_dir, internal_path, "bad_json_file.json"), True), - (os.path.join(test_dir, first_level_dir_name, "json_file.json"), False)] + test_files = [ + (os.path.join(test_dir, internal_path, "bad_json_file.json"), True), + (os.path.join(test_dir, first_level_dir_name, "json_file.json"), False), + ] for test_file in test_files: with open(test_file[0], "w+") as ext_dep_file: ext_dep_file.write(bad_json_file) - with zipfile.ZipFile(compressed_file_path, 'w') as _zip: + with zipfile.ZipFile(compressed_file_path, "w") as _zip: for test_file in test_files: _zip.write(test_file[0], arcname=test_file[0].split(test_dir)[1]) @@ -330,8 +331,10 @@ def test_unpack_tar_directory(self): # only files inside internal_path should be there after unpack # (file path, is this file expected to be unpacked?) - test_files = [(os.path.join(test_dir, internal_path, "bad_json_file.json"), True), - (os.path.join(test_dir, first_level_dir_name, "json_file.json"), False)] + test_files = [ + (os.path.join(test_dir, internal_path, "bad_json_file.json"), True), + (os.path.join(test_dir, first_level_dir_name, "json_file.json"), False), + ] for test_file in test_files: with open(test_file[0], "w+") as ext_dep_file: @@ -371,7 +374,7 @@ def test_multi_level_directory(self): # >>> testtesttest/ # >>>> testtesttesttest/ for i in range(1, number_of_layers): - internal_path = (directory_name * i) + internal_path = directory_name * i if i - 1 > 0: internal_path = os.path.join(internal_paths[i - 1], internal_path) internal_paths.insert(i, internal_path) @@ -388,8 +391,10 @@ def test_multi_level_directory(self): # create files in each folder files = [""] for file_list_counter in range(1, number_of_layers): - files.insert(file_list_counter, - os.path.join(test_dir, internal_paths[file_list_counter], file_name * file_list_counter)) + files.insert( + file_list_counter, + os.path.join(test_dir, internal_paths[file_list_counter], file_name * file_list_counter), + ) with open(files[file_list_counter], "w+") as ext_dep_file: ext_dep_file.write(bad_json_file) @@ -435,10 +440,10 @@ def test_zip_uses_linux_path_sep(self): with open(test_file, "w+") as ext_dep_file: ext_dep_file.write(bad_json_file) - with zipfile.ZipFile(compressed_file_path, 'w') as _zip: + with zipfile.ZipFile(compressed_file_path, "w") as _zip: _zip.write(test_file, arcname=test_file.split(test_dir)[1]) - with zipfile.ZipFile(compressed_file_path, 'r') as _zip: + with zipfile.ZipFile(compressed_file_path, "r") as _zip: namelist = _zip.namelist() self.assertTrue(len(namelist) == 1) @@ -498,5 +503,6 @@ def test_unpack_zip_file_attr(self): for file in [path for path in extdep_dir.rglob("*") if "Linux" in str(path) and path.is_file()]: assert file.stat().st_mode & OWNER_EXE -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests.unit/uefi_tree.py b/tests.unit/uefi_tree.py index ec17410b..5eb4961e 100644 --- a/tests.unit/uefi_tree.py +++ b/tests.unit/uefi_tree.py @@ -5,6 +5,7 @@ # SPDX-License-Identifier: BSD-2-Clause-Patent """Used for creating a minimal uefi tree for testing.""" + import os import tempfile import json @@ -31,7 +32,7 @@ def __init__(self, workspace=None, create_platform=True, with_repo=False): if workspace is None: workspace = os.path.abspath(tempfile.mkdtemp()) self.workspace = workspace - if (create_platform): + if create_platform: self._create_tree() if with_repo: self._create_repo() @@ -50,12 +51,12 @@ def get_workspace(self): def _create_repo(self): repo = git.Repo.init(self.workspace) - repo.create_remote('origin', 'https://github.com/username/repo.git') - repo.git.config('--global', 'user.email', '"johndoe@example.com"') - repo.git.config('--global', 'user.name', '"John Doe"') - repo.git.checkout('-b', "master") - repo.git.add('.') - repo.git.commit('-m', '"Initial commit"') + repo.create_remote("origin", "https://github.com/username/repo.git") + repo.git.config("--global", "user.email", '"johndoe@example.com"') + repo.git.config("--global", "user.name", '"John Doe"') + repo.git.checkout("-b", "master") + repo.git.add(".") + repo.git.commit("-m", '"Initial commit"') def _create_tree(self): """Creates a settings.py, test.dsc, Conf folder (with build_rule, target, and tools_def).""" @@ -124,14 +125,7 @@ def create_ext_dep(self, dep_type, name, version, source=None, scope="global", d source = "https://api.nuget.org/v3/index.json" if source is None: raise ValueError("Source was not provided") - data = { - "scope": scope, - "type": dep_type, - "name": name, - "version": version, - "source": source, - "flags": [] - } + data = {"scope": scope, "type": dep_type, "name": name, "version": version, "source": source, "flags": []} if extra_data is not None: data.update(extra_data) text = json.dumps(data) @@ -144,7 +138,7 @@ def create_ext_dep(self, dep_type, name, version, source=None, scope="global", d uefi_tree.write_to_file(output_path, text) return output_path - _settings_file_text = ''' + _settings_file_text = """ # @file settings.py # This contains a settingsmanger for testing ## @@ -205,16 +199,16 @@ class TestBuilder(UefiBuilder): def SetPlatformEnv(self): self.env.SetValue("EDK2_BASE_TOOLS_DIR", self.ws, "empty") return 0 - ''' + """ - _dsc_file_text = ''' + _dsc_file_text = """ [Defines] OUTPUT_DIRECTORY = Build - ''' + """ - _target_file_text = ''' + _target_file_text = """ ACTIVE_PLATFORM = Test.dsc TOOL_CHAIN_TAG = test TARGET_ARCH = X64 TARGET = DEBUG - ''' + """