diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 000000000..729132604 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,27 @@ +--- +name: pre-commit + +on: # yamllint disable-line rule:truthy + pull_request: + push: + branches: + - main + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + id: setup-python + with: + python-version: '3.11' + cache: 'pip' + - uses: actions/cache@v4 + with: + path: ~/.cache/pre-commit + key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} + - run: pip install pre-commit + - name: "run pre-commit" + run: | + pre-commit run --all-files --show-diff-on-failure diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b13a77422..e7ab0d99f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,9 @@ repos: rev: v4.5.0 hooks: - id: trailing-whitespace + exclude: '\.patch' - id: end-of-file-fixer + exclude: '^docs/overrides' - id: fix-byte-order-marker - id: mixed-line-ending - id: check-yaml @@ -16,22 +18,6 @@ repos: hooks: - id: yamllint args: [-c, .yamllint.yaml] - - repo: https://github.com/PyCQA/flake8 - rev: 7.0.0 - hooks: - - id: flake8 - exclude: '^python/' - args: - - --max-line-length=120 - - repo: https://github.com/psf/black - rev: 24.4.2 - hooks: - - id: black - exclude: '^python/' - args: - - --line-length=120 - - --check - - --diff - repo: https://github.com/python-jsonschema/check-jsonschema rev: 0.28.4 hooks: @@ -54,10 +40,8 @@ repos: rev: v0.5.4 hooks: - id: ruff - files: '^python/' args: [--fix] - id: ruff-format - files: '^python/' - repo: https://github.com/python-poetry/poetry rev: '1.7.1' hooks: diff --git a/components/nautobot/dexauth.py b/components/nautobot/dexauth.py index b3c3b3017..aed821f44 100644 --- a/components/nautobot/dexauth.py +++ b/components/nautobot/dexauth.py @@ -21,9 +21,7 @@ def _env_list(field: str, default: str) -> list[str]: STAFF_GROUPS = _env_list("DEX_STAFF_GROUPS", "ucadmin") -def group_sync( - uid, user=None, response=None, *args, **kwargs -): # pylint: disable=keyword-arg-before-vararg, unused-argument +def group_sync(uid, user=None, response=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg, unused-argument """Sync the users groups from the response and set staff/superuser as appropriate""" if user and response and response.get(GROUPS_ATTR_NAME, False): diff --git a/components/nautobot/nautobot_config.py b/components/nautobot/nautobot_config.py index 32158feeb..d79b7eb8a 100644 --- a/components/nautobot/nautobot_config.py +++ b/components/nautobot/nautobot_config.py @@ -348,7 +348,9 @@ # Send anonymized installation metrics when `nautobot-server post_upgrade` command is run. # -INSTALLATION_METRICS_ENABLED = is_truthy(os.getenv("NAUTOBOT_INSTALLATION_METRICS_ENABLED", "True")) +INSTALLATION_METRICS_ENABLED = is_truthy( + os.getenv("NAUTOBOT_INSTALLATION_METRICS_ENABLED", "True") +) # Storage backend to use for Job input files and Job output files. # @@ -467,7 +469,9 @@ def _read_cred(filename): # configure our SSO login from our secret -SOCIAL_AUTH_OIDC_OIDC_ENDPOINT = _read_cred("/opt/nautobot/sso/issuer") or os.getenv("SOCIAL_AUTH_OIDC_OIDC_ENDPOINT") +SOCIAL_AUTH_OIDC_OIDC_ENDPOINT = _read_cred("/opt/nautobot/sso/issuer") or os.getenv( + "SOCIAL_AUTH_OIDC_OIDC_ENDPOINT" +) SOCIAL_AUTH_OIDC_KEY = _read_cred("/opt/nautobot/sso/client-id") or "nautobot" SOCIAL_AUTH_OIDC_SECRET = _read_cred("/opt/nautobot/sso/client-secret") # The “openid”, “profile” and “email” are requested by default, diff --git a/containers/argo_utils/code/argo_python/__init__.py b/containers/argo_utils/code/argo_python/__init__.py index 378952273..091af98ec 100644 --- a/containers/argo_utils/code/argo_python/__init__.py +++ b/containers/argo_utils/code/argo_python/__init__.py @@ -6,7 +6,14 @@ class ArgoWorkflow: - def __init__(self, namespace: str, name: str, uid: str, api_version="argoproj.io/v1alpha1", config_file=None): + def __init__( + self, + namespace: str, + name: str, + uid: str, + api_version="argoproj.io/v1alpha1", + config_file=None, + ): if config_file: config.load_kube_config(config_file) else: diff --git a/containers/bmc-utils/code/bmc_firmware_update.py b/containers/bmc-utils/code/bmc_firmware_update.py index 330514613..583d31da1 100644 --- a/containers/bmc-utils/code/bmc_firmware_update.py +++ b/containers/bmc-utils/code/bmc_firmware_update.py @@ -18,8 +18,12 @@ if __name__ == "__main__": - parser = argparse.ArgumentParser(prog=os.path.basename(__file__), description="Update BMC firmware") - parser.add_argument("--host", required=True, help="The address of the BMC interface") + parser = argparse.ArgumentParser( + prog=os.path.basename(__file__), description="Update BMC firmware" + ) + parser.add_argument( + "--host", required=True, help="The address of the BMC interface" + ) parser.add_argument("--firmware-url", required=True, help="URL of firmware") args = parser.parse_args() @@ -41,7 +45,12 @@ "Content-Type": "multipart/form-data", } - update_data = {"UpdateRepository": True, "UpdateTarget": True, "ETag": "atag", "Section": 0} + update_data = { + "UpdateRepository": True, + "UpdateTarget": True, + "ETag": "atag", + "Section": 0, + } upd_url = urlparse.urljoin(c._conn._url, updsvc.http_push_uri) @@ -57,7 +66,9 @@ ("parameters", json.dumps(update_data)), ("file", (filename, r.raw, "application/octet-stream")), ] - rsp = c._conn._session.post(upd_url, files=multipart, verify=False, headers=headers) + rsp = c._conn._session.post( + upd_url, files=multipart, verify=False, headers=headers + ) logger.info(rsp.json()) except Exception as e: logger.error(e) diff --git a/containers/bmc-utils/code/bmc_sync_creds.py b/containers/bmc-utils/code/bmc_sync_creds.py index 9c4e93aca..a657bfab5 100644 --- a/containers/bmc-utils/code/bmc_sync_creds.py +++ b/containers/bmc-utils/code/bmc_sync_creds.py @@ -19,11 +19,21 @@ def redfish_request( - host: str, uri: str, username: str, password: str, method: str = "GET", payload: Dict | None = None + host: str, + uri: str, + username: str, + password: str, + method: str = "GET", + payload: Dict | None = None, ) -> dict: try: r = requests.request( - method, f"https://{host}{uri}", verify=False, auth=(username, password), timeout=15, json=payload + method, + f"https://{host}{uri}", + verify=False, + auth=(username, password), + timeout=15, + json=payload, ) r.raise_for_status() return r.json() @@ -67,7 +77,13 @@ def get_bmc_accounts(host: str, username: str, password: str) -> List[Dict]: raise -def set_bmc_creds(host: str, username: str, password: str, expected_username: str, expected_password: str) -> bool: +def set_bmc_creds( + host: str, + username: str, + password: str, + expected_username: str, + expected_password: str, +) -> bool: """Find the account associated with the username in question""" try: accounts = get_bmc_accounts(host, username, password) @@ -97,9 +113,12 @@ def set_bmc_creds(host: str, username: str, password: str, expected_username: st if __name__ == "__main__": parser = argparse.ArgumentParser( - prog=os.path.basename(__file__), description="Attempts to find the correct BMC credentials for a device" + prog=os.path.basename(__file__), + description="Attempts to find the correct BMC credentials for a device", + ) + parser.add_argument( + "--host", required=True, help="the address of the bmc interface for the device" ) - parser.add_argument("--host", required=True, help="the address of the bmc interface for the device") args = parser.parse_args() host = args.host @@ -117,17 +136,23 @@ def set_bmc_creds(host: str, username: str, password: str, expected_username: st logger.info("BMC credentials are in sync.") sys.exit(0) else: - logger.info("BMC credentials are NOT in sync. Trying known legacy/vendor credentials ...") + logger.info( + "BMC credentials are NOT in sync. Trying known legacy/vendor credentials ..." + ) # iDRAC defaults to blocking an IP address after 3 bad login attempts within 60 second. Since we have the # initial attempt above, we will sleep 35 seconds between any additional attempts. delay = 60 username = os.getenv("BMC_LEGACY_USER", "root") for password in legacy_passwords: - logger.info(f"Delaying for {delay} seconds to prevent failed auth lockouts ...") + logger.info( + f"Delaying for {delay} seconds to prevent failed auth lockouts ..." + ) time.sleep(delay) if verify_auth(host, username, password): - if set_bmc_creds(host, username, password, expected_username, expected_password): + if set_bmc_creds( + host, username, password, expected_username, expected_password + ): logger.info("BMC password has been synced.") sys.exit(0) diff --git a/python/ironic-understack/ironic_understack/conf.py b/python/ironic-understack/ironic_understack/conf.py index c6ff909fd..6ea1bba64 100644 --- a/python/ironic-understack/ironic_understack/conf.py +++ b/python/ironic-understack/ironic_understack/conf.py @@ -1,4 +1,5 @@ from oslo_config import cfg + CONF = cfg.CONF diff --git a/python/ironic-understack/ironic_understack/flavor_spec.py b/python/ironic-understack/ironic_understack/flavor_spec.py index d5277a133..a56448a15 100644 --- a/python/ironic-understack/ironic_understack/flavor_spec.py +++ b/python/ironic-understack/ironic_understack/flavor_spec.py @@ -61,12 +61,11 @@ def score_machine(self, machine: Machine): # but more memory space it is less desirable than the machine that # matches exactly on both disk and memory. - # Rule 1: 100% match gets the highest priority if ( - machine.memory_gb == self.memory_gb and - machine.disk_gb in self.drives and - machine.cpu in self.cpu_models + machine.memory_gb == self.memory_gb + and machine.disk_gb in self.drives + and machine.cpu in self.cpu_models ): return 100 diff --git a/python/ironic-understack/ironic_understack/machine.py b/python/ironic-understack/ironic_understack/machine.py index 019ff976c..d1fd5c1cc 100644 --- a/python/ironic-understack/ironic_understack/machine.py +++ b/python/ironic-understack/ironic_understack/machine.py @@ -10,4 +10,3 @@ class Machine: @property def memory_gb(self) -> int: return self.memory_mb // 1024 - diff --git a/python/ironic-understack/ironic_understack/redfish_inspect_understack.py b/python/ironic-understack/ironic_understack/redfish_inspect_understack.py index 06d8db6a4..3df194c17 100644 --- a/python/ironic-understack/ironic_understack/redfish_inspect_understack.py +++ b/python/ironic-understack/ironic_understack/redfish_inspect_understack.py @@ -44,7 +44,7 @@ def inspect_hardware(self, task): :returns: The resulting state of inspection. """ - upstream_state = super().inspect_hardware(task) # pyright: ignore reportAttributeAccessIssue + upstream_state = super().inspect_hardware(task) # pyright: ignore reportAttributeAccessIssue inspection_data = get_inspection_data(task.node, task.context) diff --git a/python/ironic-understack/ironic_understack/tests/test_flavor_spec.py b/python/ironic-understack/ironic_understack/tests/test_flavor_spec.py index a3e7e70a5..a575e68f2 100644 --- a/python/ironic-understack/ironic_understack/tests/test_flavor_spec.py +++ b/python/ironic-understack/ironic_understack/tests/test_flavor_spec.py @@ -93,13 +93,12 @@ def test_empty_directory(tmp_path): specs = FlavorSpec.from_directory(str(tmp_path)) assert len(specs) == 0 + @pytest.fixture def machines(): return [ # 1024 GB, exact CPU, medium - Machine( - memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=1000 - ), + Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=1000), # 800 GB, non-matching CPU Machine(memory_mb=800000, cpu="Intel Xeon E5-2676 v3", disk_gb=500), # 200 GB, exact CPU, medium @@ -140,19 +139,26 @@ def flavors(): ), ] + def test_exact_match(flavors): - machine = Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500) + machine = Machine( + memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500 + ) assert flavors[0].score_machine(machine) == 100 assert flavors[1].score_machine(machine) == 0 def test_memory_too_small(flavors): - machine = Machine(memory_mb=51200, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500) + machine = Machine( + memory_mb=51200, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500 + ) assert all(flavor.score_machine(machine) for flavor in flavors) == 0 def test_disk_too_small(flavors): - machine = Machine(memory_mb=204800, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=100) + machine = Machine( + memory_mb=204800, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=100 + ) assert all(flavor.score_machine(machine) for flavor in flavors) == 0 @@ -162,35 +168,46 @@ def test_cpu_model_not_matching(flavors): def test_memory_match_but_more_disk(flavors): - machine = Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=1000) + machine = Machine( + memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=1000 + ) assert flavors[0].score_machine(machine) > 0 def test_disk_match_but_more_memory(flavors): - machine = Machine(memory_mb=204800, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500) + machine = Machine( + memory_mb=204800, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500 + ) assert flavors[0].score_machine(machine) > 0 assert flavors[1].score_machine(machine) == 0 assert flavors[2].score_machine(machine) == 0 + # Edge cases def test_memory_slightly_less(flavors): # Machine with slightly less memory than required by the smallest flavor - machine = Machine(memory_mb=102300, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500) + machine = Machine( + memory_mb=102300, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500 + ) # Should not match because memory is slightly less assert all(flavor.score_machine(machine) for flavor in flavors) == 0 def test_disk_slightly_less(flavors): # Machine with slightly less disk space than required by the smallest flavor - machine = Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=499) + machine = Machine( + memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=499 + ) # Should not match because disk space is slightly less assert all(flavor.score_machine(machine) for flavor in flavors) == 0 def test_memory_exact_disk_slightly_more(flavors): # Machine with exact memory but slightly more disk space than required - machine = Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=501) + machine = Machine( + memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=501 + ) assert flavors[0].score_machine(machine) > 0 assert flavors[1].score_machine(machine) == 0 assert flavors[2].score_machine(machine) == 0 @@ -198,7 +215,9 @@ def test_memory_exact_disk_slightly_more(flavors): def test_disk_exact_memory_slightly_more(flavors): # Machine with exact disk space but slightly more memory than required - machine = Machine(memory_mb=102500, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500) + machine = Machine( + memory_mb=102500, cpu="AMD EPYC 9254 245-Core Processor", disk_gb=500 + ) assert flavors[0].score_machine(machine) > 0 assert flavors[1].score_machine(machine) == 0 assert flavors[2].score_machine(machine) == 0 @@ -206,7 +225,9 @@ def test_disk_exact_memory_slightly_more(flavors): def test_cpu_model_not_exact_but_memory_and_disk_match(flavors): # Machine with exact memory and disk space but CPU model is close but not exact - machine = Machine(memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor v2", disk_gb=500) + machine = Machine( + memory_mb=102400, cpu="AMD EPYC 9254 245-Core Processor v2", disk_gb=500 + ) # Should not match because CPU model is not exactly listed assert all(flavor.score_machine(machine) for flavor in flavors) == 0 diff --git a/python/ironic-understack/ironic_understack/tests/test_machine.py b/python/ironic-understack/ironic_understack/tests/test_machine.py index a17bb6b43..86d7937e6 100644 --- a/python/ironic-understack/ironic_understack/tests/test_machine.py +++ b/python/ironic-understack/ironic_understack/tests/test_machine.py @@ -1,6 +1,6 @@ -import pytest from ironic_understack.machine import Machine + def test_memory_gb_property(): # Test a machine with exactly 1 GB of memory machine = Machine(memory_mb=1024, cpu="x86", disk_gb=50) diff --git a/python/ironic-understack/ironic_understack/tests/test_matcher.py b/python/ironic-understack/ironic_understack/tests/test_matcher.py index fbc7558fc..b32561b23 100644 --- a/python/ironic-understack/ironic_understack/tests/test_matcher.py +++ b/python/ironic-understack/ironic_understack/tests/test_matcher.py @@ -3,22 +3,47 @@ from ironic_understack.machine import Machine from ironic_understack.matcher import Matcher + @pytest.fixture def sample_flavors(): return [ - FlavorSpec(name="small", memory_gb=4, cpu_cores=2, cpu_models=["x86", "ARM"], drives=[20], devices=[]), - FlavorSpec(name="medium", memory_gb=8, cpu_cores=4, cpu_models=["x86"], drives=[40], devices=[]), - FlavorSpec(name="large", memory_gb=16, cpu_cores=8, cpu_models=["x86"], drives=[80], devices=[]), + FlavorSpec( + name="small", + memory_gb=4, + cpu_cores=2, + cpu_models=["x86", "ARM"], + drives=[20], + devices=[], + ), + FlavorSpec( + name="medium", + memory_gb=8, + cpu_cores=4, + cpu_models=["x86"], + drives=[40], + devices=[], + ), + FlavorSpec( + name="large", + memory_gb=16, + cpu_cores=8, + cpu_models=["x86"], + drives=[80], + devices=[], + ), ] + @pytest.fixture def matcher(sample_flavors): return Matcher(flavors=sample_flavors) + @pytest.fixture def machine(): return Machine(memory_mb=8192, cpu="x86", disk_gb=50) + def test_match(matcher, machine): # This machine should match the small and medium flavors results = matcher.match(machine) @@ -26,18 +51,21 @@ def test_match(matcher, machine): assert results[0].name == "small" assert results[1].name == "medium" + def test_match_no_flavor(matcher): # A machine that does not meet any flavor specs machine = Machine(memory_mb=2048, cpu="x86", disk_gb=10) results = matcher.match(machine) assert len(results) == 0 + def test_pick_best_flavor2(matcher, machine): # This machine should pick the medium flavor as the best best_flavor = matcher.pick_best_flavor(machine) assert best_flavor is not None assert best_flavor.name == "medium" + def test_pick_best_flavor_no_match(matcher): # A machine that does not meet any flavor specs machine = Machine(memory_mb=1024, cpu="ARM", disk_gb=10) diff --git a/python/neutron-understack/neutron_understack/argo/workflows.py b/python/neutron-understack/neutron_understack/argo/workflows.py index 4e6b97d74..8ed93beb2 100644 --- a/python/neutron-understack/neutron_understack/argo/workflows.py +++ b/python/neutron-understack/neutron_understack/argo/workflows.py @@ -17,6 +17,7 @@ def __init__( api_url="https://argo-server.argo.svc.cluster.local:2746", logger=None, ): + """Simple Argo Workflows Client.""" if token is None: with open(DEFAULT_TOKEN_FILENAME) as token_file: token = token_file.read() @@ -26,7 +27,6 @@ def __init__( self.headers = {"Authorization": f"Bearer {self.token}"} self.logger = logger - def submit( self, template_name: str, diff --git a/python/understack-workflows/tests/conftest.py b/python/understack-workflows/tests/conftest.py index 4907c656f..34fd2df36 100644 --- a/python/understack-workflows/tests/conftest.py +++ b/python/understack-workflows/tests/conftest.py @@ -25,12 +25,12 @@ def project_id() -> uuid.UUID: @pytest.fixture def bmc_username() -> str: - return 'root' + return "root" @pytest.fixture def bmc_password() -> str: - return 'password' + return "password" @pytest.fixture diff --git a/python/understack-workflows/tests/test_sync_bmc_creds.py b/python/understack-workflows/tests/test_sync_bmc_creds.py index 49753457f..1f8ccbf13 100644 --- a/python/understack-workflows/tests/test_sync_bmc_creds.py +++ b/python/understack-workflows/tests/test_sync_bmc_creds.py @@ -1,7 +1,8 @@ +import json +import pathlib import sys + import pytest -import pathlib -import json from understack_workflows.main.sync_bmc_creds import get_args from understack_workflows.node_configuration import IronicNodeConfiguration @@ -16,8 +17,11 @@ def read_json_samples(file_path): @pytest.fixture(autouse=True) def mock_args(monkeypatch): - monkeypatch.setattr(sys, "argv", ["pytest", - read_json_samples("json_samples/event-interface-update.json")]) + monkeypatch.setattr( + sys, + "argv", + ["pytest", read_json_samples("json_samples/event-interface-update.json")], + ) @pytest.fixture @@ -26,32 +30,36 @@ def fake_ironic_client(mocker): def get_ironic_node_state(fake_ironic_client, node_data): - node = IronicNodeConfiguration.from_event(json.loads(read_json_samples("json_samples/event-interface-update.json"))) + node = IronicNodeConfiguration.from_event( + json.loads(read_json_samples("json_samples/event-interface-update.json")) + ) ironic_node = fake_ironic_client.get_node(node.uuid) ironic_node.return_value = node_data - return ironic_node.return_value['provision_state'] + return ironic_node.return_value["provision_state"] def test_args(): var = get_args() - assert var['data']['ip_addresses'][0]['host'] == "10.46.96.156" + assert var["data"]["ip_addresses"][0]["host"] == "10.46.96.156" def test_ironic_non_allowing_states(fake_ironic_client): - ironic_node_state = get_ironic_node_state(fake_ironic_client, - json.loads(read_json_samples( - "json_samples/ironic-active-node-data.json"))) + ironic_node_state = get_ironic_node_state( + fake_ironic_client, + json.loads(read_json_samples("json_samples/ironic-active-node-data.json")), + ) with pytest.raises(SystemExit) as sys_exit: if ironic_node_state not in ["enroll", "manageable"]: - print('checking') + print("checking") sys.exit(0) assert sys_exit.value.code == 0 def test_ironic_node_allowing_states(fake_ironic_client): - ironic_node_state = get_ironic_node_state(fake_ironic_client, - json.loads(read_json_samples( - "json_samples/ironic-enroll-node-data.json"))) + ironic_node_state = get_ironic_node_state( + fake_ironic_client, + json.loads(read_json_samples("json_samples/ironic-enroll-node-data.json")), + ) assert ironic_node_state in ["enroll", "manageable"] diff --git a/python/understack-workflows/tests/test_sync_nautobot_system_info.py b/python/understack-workflows/tests/test_sync_nautobot_system_info.py index 47ba46ff0..07aeed3ad 100644 --- a/python/understack-workflows/tests/test_sync_nautobot_system_info.py +++ b/python/understack-workflows/tests/test_sync_nautobot_system_info.py @@ -1,7 +1,6 @@ import pytest -from understack_workflows.main.sync_nautobot_system_info import argument_parser, do_sync -from understack_workflows.models import Systeminfo +from understack_workflows.main.sync_nautobot_system_info import argument_parser @pytest.fixture @@ -12,17 +11,31 @@ def fakebot(mocker): def test_parse_device_name(): parser = argument_parser(__name__) with pytest.raises(SystemExit): - parser.parse_args(["--device-id", "FOO", "--bmc_username", "root", "--bmc_password", "password"]) + parser.parse_args( + [ + "--device-id", + "FOO", + "--bmc_username", + "root", + "--bmc_password", + "password", + ] + ) def test_parse_device_id(device_id, bmc_username, bmc_password): parser = argument_parser(__name__) - args = parser.parse_args(["--device-id", str(device_id), "--bmc_username", bmc_username, - "--bmc_password", bmc_password]) + args = parser.parse_args( + [ + "--device-id", + str(device_id), + "--bmc_username", + bmc_username, + "--bmc_password", + bmc_password, + ] + ) assert args.device_id == device_id assert args.bmc_username == bmc_username assert args.bmc_password == bmc_password - - - diff --git a/python/understack-workflows/tests/test_sync_server.py b/python/understack-workflows/tests/test_sync_server.py index 65c7b0899..fa8859bd8 100644 --- a/python/understack-workflows/tests/test_sync_server.py +++ b/python/understack-workflows/tests/test_sync_server.py @@ -1,9 +1,12 @@ +import json +import pathlib import sys + import pytest -import pathlib -import json -from understack_workflows.main.sync_server import get_args, get_ironic_node, update_ironic_node +from understack_workflows.main.sync_server import get_args +from understack_workflows.main.sync_server import get_ironic_node +from understack_workflows.main.sync_server import update_ironic_node from understack_workflows.node_configuration import IronicNodeConfiguration @@ -16,8 +19,11 @@ def read_json_samples(file_path): @pytest.fixture(autouse=True) def mock_args(monkeypatch): - monkeypatch.setattr(sys, "argv", ["pytest", - read_json_samples("json_samples/event-interface-update.json")]) + monkeypatch.setattr( + sys, + "argv", + ["pytest", read_json_samples("json_samples/event-interface-update.json")], + ) @pytest.fixture @@ -26,50 +32,62 @@ def fake_client(mocker): def get_ironic_node_state(fake_client, node_data): - node = IronicNodeConfiguration.from_event(json.loads(read_json_samples("json_samples/event-interface-update.json"))) + node = IronicNodeConfiguration.from_event( + json.loads(read_json_samples("json_samples/event-interface-update.json")) + ) ironic_node = get_ironic_node(node, fake_client) ironic_node.return_value = node_data - return ironic_node.return_value['provision_state'] + return ironic_node.return_value["provision_state"] def test_args(): var = get_args() - assert var['data']['ip_addresses'][0]['host'] == "10.46.96.156" + assert var["data"]["ip_addresses"][0]["host"] == "10.46.96.156" def test_ironic_node_allowing_states(fake_client): - ironic_node_state = get_ironic_node_state(fake_client, - json.loads(read_json_samples( - "json_samples/ironic-enroll-node-data.json"))) + ironic_node_state = get_ironic_node_state( + fake_client, + json.loads(read_json_samples("json_samples/ironic-enroll-node-data.json")), + ) assert ironic_node_state in ["enroll", "manageable"] def test_ironic_non_allowing_states(fake_client): - ironic_node_state = get_ironic_node_state(fake_client, - json.loads(read_json_samples( - "json_samples/ironic-active-node-data.json"))) + ironic_node_state = get_ironic_node_state( + fake_client, + json.loads(read_json_samples("json_samples/ironic-active-node-data.json")), + ) assert ironic_node_state not in ["enroll", "manageable"] def test_update_ironic_node(fake_client): - node = IronicNodeConfiguration.from_event(json.loads(read_json_samples("json_samples/event-interface-update.json"))) - drac_ip = json.loads(read_json_samples("json_samples/event-interface-update.json"))['data']["ip_addresses"][0]["host"] - - patches = [{'op': 'add', 'path': '/name', 'value': '1327198-GP2S.3.understack.iad3'}, - {'op': 'add', 'path': '/driver', 'value': 'idrac'}, - {'op': 'add', - 'path': '/driver_info/redfish_address', - 'value': 'https://10.46.96.156'}, - {'op': 'add', 'path': '/driver_info/redfish_verify_ca', 'value': False}, - {'op': 'remove', 'path': '/bios_interface'}, - {'op': 'remove', 'path': '/boot_interface'}, - {'op': 'remove', 'path': '/inspect_interface'}, - {'op': 'remove', 'path': '/management_interface'}, - {'op': 'remove', 'path': '/power_interface'}, - {'op': 'remove', 'path': '/vendor_interface'}, - {'op': 'remove', 'path': '/raid_interface'}, - {'op': 'remove', 'path': '/network_interface'}] + node = IronicNodeConfiguration.from_event( + json.loads(read_json_samples("json_samples/event-interface-update.json")) + ) + drac_ip = json.loads(read_json_samples("json_samples/event-interface-update.json"))[ + "data" + ]["ip_addresses"][0]["host"] + + patches = [ + {"op": "add", "path": "/name", "value": "1327198-GP2S.3.understack.iad3"}, + {"op": "add", "path": "/driver", "value": "idrac"}, + { + "op": "add", + "path": "/driver_info/redfish_address", + "value": "https://10.46.96.156", + }, + {"op": "add", "path": "/driver_info/redfish_verify_ca", "value": False}, + {"op": "remove", "path": "/bios_interface"}, + {"op": "remove", "path": "/boot_interface"}, + {"op": "remove", "path": "/inspect_interface"}, + {"op": "remove", "path": "/management_interface"}, + {"op": "remove", "path": "/power_interface"}, + {"op": "remove", "path": "/vendor_interface"}, + {"op": "remove", "path": "/raid_interface"}, + {"op": "remove", "path": "/network_interface"}, + ] update_ironic_node(node, drac_ip, fake_client) fake_client.update_node.assert_called_once_with(node.uuid, patches) diff --git a/python/understack-workflows/understack_workflows/main/undersync_device.py b/python/understack-workflows/understack_workflows/main/undersync_device.py index 525bec79f..7ffc4bc49 100644 --- a/python/understack-workflows/understack_workflows/main/undersync_device.py +++ b/python/understack-workflows/understack_workflows/main/undersync_device.py @@ -1,7 +1,7 @@ import argparse import os -from pprint import pprint import sys +from pprint import pprint from uuid import UUID import requests @@ -59,7 +59,7 @@ def update_nautobot_for_provisioning( def vlan_group_id_for(device_id, nautobot): result = nautobot.session.graphql.query( - '{device(id: "%s") { rel_vlan_group_to_devices {id}}}' % device_id + f'{{device(id: "{device_id}") {{ rel_vlan_group_to_devices {{id}}}}}}' ) if not result.json or result.json.get("errors"): raise Exception(f"Nautobot vlan_group graphql query failed: {result}") @@ -69,14 +69,13 @@ def vlan_group_id_for(device_id, nautobot): def update_nautobot_for_tenant( nb_url, nb_token, server_interface_mac: str, ucvni_id: UUID ) -> UUID: - """Runs a Nautobot Job to update a switch interface for tenant mode + """Runs a Nautobot Job to update a switch interface for tenant mode. The nautobot job will assign vlans as required and set the interface into the correct mode for "normal" tenant operation. The vlan group ID is returned. """ - # Making this http request directly because it was not clear how to get # the pynautobot api client to call an arbitrary endpoint: diff --git a/python/understack-workflows/understack_workflows/models.py b/python/understack-workflows/understack_workflows/models.py index db3d6db86..298c2a586 100644 --- a/python/understack-workflows/understack_workflows/models.py +++ b/python/understack-workflows/understack_workflows/models.py @@ -104,9 +104,11 @@ class Systeminfo: @classmethod def from_redfish(cls, chassis_data) -> Systeminfo: - return cls(asset_tag=chassis_data.sku, - serial_number=chassis_data.serial_number, - platform=chassis_data.model) + return cls( + asset_tag=chassis_data.sku, + serial_number=chassis_data.serial_number, + platform=chassis_data.model, + ) @dataclass diff --git a/python/understack-workflows/understack_workflows/nautobot.py b/python/understack-workflows/understack_workflows/nautobot.py index 529de2112..07ea6843a 100644 --- a/python/understack-workflows/understack_workflows/nautobot.py +++ b/python/understack-workflows/understack_workflows/nautobot.py @@ -122,11 +122,13 @@ def interface_by_id(self, interface_id: UUID) -> NautobotInterface: self.exit_with_error(f"Interface {interface_id!s} not found in Nautobot") return interface - def non_lag_interface_by_mac(self, device_id: UUID, mac_address: str) -> list[NautobotInterface]: + def non_lag_interface_by_mac( + self, device_id: UUID, mac_address: str + ) -> list[NautobotInterface]: interfaces = self.session.dcim.interfaces.filter( device_id=device_id, mac_address=mac_address, - type__n = "lag", + type__n="lag", ) if not interfaces: self.exit_with_error( @@ -147,7 +149,7 @@ def update_cf(self, device_id: UUID, field_name: str, field_value: str): def update_switch_interface_status( self, device_id: UUID, server_interface_mac: str, new_status: str ) -> NautobotInterface: - """Change the Interface Status in Nautobot for interfaces + """Change the Interface Status in Nautobot for interfaces. The device_id and interface MAC address parameters identify one or more server interfaces. @@ -160,12 +162,15 @@ def update_switch_interface_status( The interface is returned. """ - server_interface = self.non_lag_interface_by_mac(device_id, server_interface_mac) + server_interface = self.non_lag_interface_by_mac( + device_id, server_interface_mac + ) connected_endpoint = server_interface.connected_endpoint if not connected_endpoint: raise Exception( - f"Interface {server_interface_mac=} {server_interface.type} is not connected in Nautobot" + f"Interface {server_interface_mac=} {server_interface.type} " + "is not connected in Nautobot" ) switch_interface_id = connected_endpoint.id self.logger.debug( diff --git a/python/understack-workflows/understack_workflows/port_configuration.py b/python/understack-workflows/understack_workflows/port_configuration.py index 2ac369017..be78b01ad 100644 --- a/python/understack-workflows/understack_workflows/port_configuration.py +++ b/python/understack-workflows/understack_workflows/port_configuration.py @@ -14,6 +14,6 @@ class PortConfiguration(BaseModel): name: str # port name # Ironic requires the port names to be globally unique - @field_serializer('name') + @field_serializer("name") def serialize_name(self, name: str): return f"{self.uuid} {name}" diff --git a/python/understack-workflows/understack_workflows/undersync/client.py b/python/understack-workflows/understack_workflows/undersync/client.py index b58637d0e..8474c78d0 100644 --- a/python/understack-workflows/understack_workflows/undersync/client.py +++ b/python/understack-workflows/understack_workflows/undersync/client.py @@ -9,6 +9,7 @@ def __init__( auth_token: str, api_url="http://undersync-service.undersync.svc.cluster.local:8080", ) -> None: + """Simple client for Undersync.""" self.token = auth_token self.api_url = api_url diff --git a/scripts/argo-workflows-to-mkdocs.py b/scripts/argo-workflows-to-mkdocs.py index beff2d229..06cd44f81 100644 --- a/scripts/argo-workflows-to-mkdocs.py +++ b/scripts/argo-workflows-to-mkdocs.py @@ -11,7 +11,11 @@ log = logging.getLogger(__name__) -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") +logging.basicConfig( + stream=sys.stdout, + level=logging.DEBUG, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) SHOW_ARTIFACTS = True @@ -108,15 +112,20 @@ def main(): for workflow in subfolders: log.debug(f"working on workflow: {workflow.name} - {workflow.path}") - included_extensions = ['.yml', '.yaml'] + included_extensions = [".yml", ".yaml"] templates_path = workflow.path + "/workflowtemplates/" log.debug(f"templates_path: {templates_path}") try: - workflow_files = [f"{templates_path}/{fn}" for fn in os.listdir(templates_path) - if any(fn.endswith(ext) for ext in included_extensions)] + workflow_files = [ + f"{templates_path}/{fn}" + for fn in os.listdir(templates_path) + if any(fn.endswith(ext) for ext in included_extensions) + ] log.debug(f"found workflow files: {workflow_files}") except FileNotFoundError: - log.warning(f"Directory: {templates_path} unable to find workflow templates") + log.warning( + f"Directory: {templates_path} unable to find workflow templates" + ) continue workflows = {} @@ -145,40 +154,63 @@ def main(): output_file = output_dir + f"/{workflow.name}.md" - generate_mermaid(workflows.values(), nodes, workflow.name, output_file, workflow_readme) + generate_mermaid( + workflows.values(), nodes, workflow.name, output_file, workflow_readme + ) def make_workflow(w_yaml): - w_name = w_yaml['metadata']['name'] - workflow_title = w_yaml['metadata'].get('annotations', {}).get('workflows.argoproj.io/title', 'Title not set in workflow') - workflow_description = w_yaml['metadata'].get('annotations', {}).get('workflows.argoproj.io/description', 'Description not set in workflow').strip("\n") - w = Workflow(id=w_name, name=w_name, title=workflow_title, description=workflow_description, nodes=[]) - for n_yaml in w_yaml['spec']['templates']: - n_id = w_name + "__" + n_yaml['name'] - image = n_yaml.get('container', {}).get('image', '').split("/")[-1].split(":")[0].replace("ai-workflow-", "") - script = n_yaml.get('script', {}).get('image', "") + w_name = w_yaml["metadata"]["name"] + workflow_title = ( + w_yaml["metadata"] + .get("annotations", {}) + .get("workflows.argoproj.io/title", "Title not set in workflow") + ) + workflow_description = ( + w_yaml["metadata"] + .get("annotations", {}) + .get("workflows.argoproj.io/description", "Description not set in workflow") + .strip("\n") + ) + w = Workflow( + id=w_name, + name=w_name, + title=workflow_title, + description=workflow_description, + nodes=[], + ) + for n_yaml in w_yaml["spec"]["templates"]: + n_id = w_name + "__" + n_yaml["name"] + image = ( + n_yaml.get("container", {}) + .get("image", "") + .split("/")[-1] + .split(":")[0] + .replace("ai-workflow-", "") + ) + script = n_yaml.get("script", {}).get("image", "") input_params = {} - for p_yaml in n_yaml.get('inputs', {}).get('parameters', []): - if 'value' in p_yaml and "{{" not in p_yaml['value']: - input_params[p_yaml['name']] = p_yaml['value'] + for p_yaml in n_yaml.get("inputs", {}).get("parameters", []): + if "value" in p_yaml and "{{" not in p_yaml["value"]: + input_params[p_yaml["name"]] = p_yaml["value"] output_params = {} - for p_yaml in n_yaml.get('outputs', {}).get('parameters', []): - if 'value' in p_yaml and "{{" not in p_yaml['value']: - output_params[p_yaml['name']] = p_yaml['value'] + for p_yaml in n_yaml.get("outputs", {}).get("parameters", []): + if "value" in p_yaml and "{{" not in p_yaml["value"]: + output_params[p_yaml["name"]] = p_yaml["value"] input_artifacts = [] - for a_yaml in n_yaml.get('inputs', {}).get('artifacts', []): - input_artifacts.append(a_yaml['name']) + for a_yaml in n_yaml.get("inputs", {}).get("artifacts", []): + input_artifacts.append(a_yaml["name"]) output_artifacts = [] - for a_yaml in n_yaml.get('outputs', {}).get('artifacts', []): - output_artifacts.append(a_yaml['name']) + for a_yaml in n_yaml.get("outputs", {}).get("artifacts", []): + output_artifacts.append(a_yaml["name"]) n = Node( id=n_id, - name=n_yaml['name'], + name=n_yaml["name"], image=image, script=script, incoming_count=0, @@ -189,141 +221,167 @@ def make_workflow(w_yaml): tasks=[], steps=[], ) - tasks_yaml = n_yaml.get('dag', {}).get('tasks', []) + tasks_yaml = n_yaml.get("dag", {}).get("tasks", []) for t_yaml in tasks_yaml or []: - t_id = n_id + "__" + t_yaml['name'] - dependencies = [n_id + "__" + d for d in t_yaml.get('dependencies', [])] - if 'depends' in t_yaml: - for d in parse_depends(t_yaml['depends']): + t_id = n_id + "__" + t_yaml["name"] + dependencies = [n_id + "__" + d for d in t_yaml.get("dependencies", [])] + if "depends" in t_yaml: + for d in parse_depends(t_yaml["depends"]): dependencies.append(n_id + "__" + d) dependencies = list(dict.fromkeys(dependencies)) ref_node = None - if 'templateRef' in t_yaml: - ref_node = t_yaml['templateRef']['name'] + "__" + t_yaml['templateRef']['template'] - elif 'template' in t_yaml: - ref_node = w_name + "__" + t_yaml['template'] + if "templateRef" in t_yaml: + ref_node = ( + t_yaml["templateRef"]["name"] + + "__" + + t_yaml["templateRef"]["template"] + ) + elif "template" in t_yaml: + ref_node = w_name + "__" + t_yaml["template"] params = {} - for p_yaml in t_yaml.get('arguments', {}).get('parameters', []): - if 'value' in p_yaml and "{{" not in p_yaml['value']: - params[p_yaml['name']] = p_yaml['value'] + for p_yaml in t_yaml.get("arguments", {}).get("parameters", []): + if "value" in p_yaml and "{{" not in p_yaml["value"]: + params[p_yaml["name"]] = p_yaml["value"] def parse_artifacts(s): s = s.strip() if s.startswith("{{") and s.endswith("}}"): s = s[2:-2].strip() ret = dict() - matches = re.findall(r'inputs.artifacts.([\w\-_]+)\b', s) + matches = re.findall(r"inputs.artifacts.([\w\-_]+)\b", s) if matches: ret.update(dict.fromkeys(matches)) - matches = re.findall(r'input.artifacts.([\w\-_]+)', s) + matches = re.findall(r"input.artifacts.([\w\-_]+)", s) if matches: ret.update(dict.fromkeys(matches)) - matches = re.findall(r'tasks.([\w\-_]+).outputs.artifacts.([\w\-_]+)', s) + matches = re.findall( + r"tasks.([\w\-_]+).outputs.artifacts.([\w\-_]+)", s + ) if matches: - ret.update({m[0] + "__" + m[1] :"" for m in matches}) + ret.update({m[0] + "__" + m[1]: "" for m in matches}) return list(ret.keys()) if ret else [s] input_artifacts = [] - for i_yaml in t_yaml.get('arguments', {}).get('artifacts', []): - if i_yaml.get('from'): - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(i_yaml['from'])]) - elif i_yaml.get('fromExpression'): - match = re.match(r'(.*)\?(.*):(.*)', i_yaml['fromExpression']) + for i_yaml in t_yaml.get("arguments", {}).get("artifacts", []): + if i_yaml.get("from"): + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(i_yaml["from"])] + ) + elif i_yaml.get("fromExpression"): + match = re.match(r"(.*)\?(.*):(.*)", i_yaml["fromExpression"]) if match: _, a1, a2 = match.groups() - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(a1)]) - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(a2)]) + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(a1)] + ) + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(a2)] + ) else: raise ValueError("Unknown artifact type: " + str(i_yaml)) else: raise ValueError("Unknown artifact type: " + str(i_yaml)) t = Task( - id=t_id, name=t_yaml['name'], + id=t_id, + name=t_yaml["name"], dependencies=dependencies, - when=t_yaml.get('when'), + when=t_yaml.get("when"), ref_node=ref_node, input_params=params, - input_artifacts=input_artifacts + input_artifacts=input_artifacts, ) n.tasks.append(t) n.tasks.sort(key=lambda t: t.name) - steps_yaml = n_yaml.get('steps', []) + steps_yaml = n_yaml.get("steps", []) log.debug(f"steps_yaml: {steps_yaml}") counter = 0 - previous_yaml = None + _previous_yaml = None for t_yaml in steps_yaml or []: log.debug(f"processing step: {t_yaml}") - if type(t_yaml) == list: + if isinstance(t_yaml, list): t_yaml = t_yaml[0] - log.debug(f"t_yaml is a list? converted it.") + log.debug("t_yaml is a list? converted it.") - t_id = n_id + "__" + t_yaml['name'] - dependencies = [n_id + "__" + d for d in t_yaml.get('dependencies', [])] - if 'depends' in t_yaml: - for d in parse_depends(t_yaml['depends']): + t_id = n_id + "__" + t_yaml["name"] + dependencies = [n_id + "__" + d for d in t_yaml.get("dependencies", [])] + if "depends" in t_yaml: + for d in parse_depends(t_yaml["depends"]): dependencies.append(n_id + "__" + d) dependencies = list(dict.fromkeys(dependencies)) log.debug(f"steps: dependencies: {dependencies}") ref_node = None - if 'templateRef' in t_yaml: - ref_node = t_yaml['templateRef']['name'] + "__" + t_yaml['templateRef']['template'] - elif 'template' in t_yaml: - ref_node = w_name + "__" + t_yaml['template'] + if "templateRef" in t_yaml: + ref_node = ( + t_yaml["templateRef"]["name"] + + "__" + + t_yaml["templateRef"]["template"] + ) + elif "template" in t_yaml: + ref_node = w_name + "__" + t_yaml["template"] params = {} - for p_yaml in t_yaml.get('arguments', {}).get('parameters', []): - if 'value' in p_yaml and "{{" not in p_yaml['value']: - params[p_yaml['name']] = p_yaml['value'] + for p_yaml in t_yaml.get("arguments", {}).get("parameters", []): + if "value" in p_yaml and "{{" not in p_yaml["value"]: + params[p_yaml["name"]] = p_yaml["value"] def parse_artifacts(s): s = s.strip() if s.startswith("{{") and s.endswith("}}"): s = s[2:-2].strip() ret = dict() - matches = re.findall(r'inputs.artifacts.([\w\-_]+)\b', s) + matches = re.findall(r"inputs.artifacts.([\w\-_]+)\b", s) if matches: ret.update(dict.fromkeys(matches)) - matches = re.findall(r'input.artifacts.([\w\-_]+)', s) + matches = re.findall(r"input.artifacts.([\w\-_]+)", s) if matches: ret.update(dict.fromkeys(matches)) - matches = re.findall(r'tasks.([\w\-_]+).outputs.artifacts.([\w\-_]+)', s) + matches = re.findall( + r"tasks.([\w\-_]+).outputs.artifacts.([\w\-_]+)", s + ) if matches: - ret.update({m[0] + "__" + m[1] :"" for m in matches}) + ret.update({m[0] + "__" + m[1]: "" for m in matches}) return list(ret.keys()) if ret else [s] input_artifacts = [] - for i_yaml in t_yaml.get('arguments', {}).get('artifacts', []): - if i_yaml.get('from'): - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(i_yaml['from'])]) - elif i_yaml.get('fromExpression'): - match = re.match(r'(.*)\?(.*):(.*)', i_yaml['fromExpression']) + for i_yaml in t_yaml.get("arguments", {}).get("artifacts", []): + if i_yaml.get("from"): + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(i_yaml["from"])] + ) + elif i_yaml.get("fromExpression"): + match = re.match(r"(.*)\?(.*):(.*)", i_yaml["fromExpression"]) if match: _, a1, a2 = match.groups() - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(a1)]) - input_artifacts.extend([n.id + "__" + a_id for a_id in parse_artifacts(a2)]) + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(a1)] + ) + input_artifacts.extend( + [n.id + "__" + a_id for a_id in parse_artifacts(a2)] + ) else: raise ValueError("Unknown artifact type: " + str(i_yaml)) else: raise ValueError("Unknown artifact type: " + str(i_yaml)) t = Step( - id=t_id, name=t_yaml['name'], + id=t_id, + name=t_yaml["name"], dependencies=dependencies, - when=t_yaml.get('when'), + when=t_yaml.get("when"), ref_node=ref_node, input_params=params, - input_artifacts=input_artifacts + input_artifacts=input_artifacts, ) n.steps.append(t) n.steps.sort(key=lambda t: t.name) counter += 1 - previous_yaml = t_yaml + _previous_yaml = t_yaml - if w_yaml.get('spec', {}).get('entrypoint') == n_yaml['name']: + if w_yaml.get("spec", {}).get("entrypoint") == n_yaml["name"]: n.is_entrypoint = True n.tasks.sort(key=lambda t: t.name) @@ -331,6 +389,7 @@ def parse_artifacts(s): w.nodes.append(n) return w + def show_node(n): if n.is_entrypoint: return True @@ -344,13 +403,14 @@ def show_node(n): return n.incoming_count > 1 return False + def generate_mermaid(workflows, nodes, output_name, output_file, workflow_readme): bases = [] # render nodes and tasks for w in workflows: bases.append(f"subgraph {w.name}") bases.append(" direction TB") - bases.append(" style "+w.name+" fill:#fafaff;") + bases.append(" style " + w.name + " fill:#fafaff;") if SHOW_WORKFLOW_DESCRIPTIONS: bases.append(" subgraph Description") @@ -361,89 +421,101 @@ def generate_mermaid(workflows, nodes, output_name, output_file, workflow_readme for n in w.nodes: if show_node(n): # render the node itself - name = f"{n.name}" + name = f'{n.name}' if n.image: - name += f"\\nimage: {n.image}" + name += f'\\nimage: {n.image}' if n.script: - name += f"\\nscript: {n.script}" + name += f'\\nscript: {n.script}' if n.input_params: - name += f"
"
+                    name += '
'
                     for p in n.input_params:
                         # some of our input param values are '{}' which mermaid doesn't like
-                        convert_input_params = n.input_params[p].replace("{", "{").replace("}", "{")
+                        convert_input_params = (
+                            n.input_params[p]
+                            .replace("{", "{")
+                            .replace("}", "{")
+                        )
                         name += f"{p}={convert_input_params}
" name += "
" - bases.append(" "+n.id+"{{"+name+"}}") - bases.append(" style "+n.id+" fill:lightgray,stroke:#aaa;") + bases.append(" " + n.id + "{{" + name + "}}") + bases.append(" style " + n.id + " fill:lightgray,stroke:#aaa;") # render output artifacts if SHOW_ARTIFACTS: for a in n.input_artifacts: a_id = n.id + "__" + a - name = f"{a}" + name = f'{a}' bases.append(f" {a_id}({name})") - bases.append(" style "+a_id+" fill:gold,stroke:#222;") + bases.append(" style " + a_id + " fill:gold,stroke:#222;") for t in n.tasks: # render the task - name = f"{t.name}" + name = f'{t.name}' if t.when: when = t.when.replace("{{", "").replace("}}", "") - name += f"
when: {when}
" - if t.ref_node and nodes.get(t.ref_node) and not show_node(nodes[t.ref_node]): + name += f'
when: {when}
' + if ( + t.ref_node + and nodes.get(t.ref_node) + and not show_node(nodes[t.ref_node]) + ): if nodes[t.ref_node].image: - name += f"
image: {nodes[t.ref_node].image}
" + name += f'
image: {nodes[t.ref_node].image}
' if nodes[t.ref_node].script: - name += f"
script: {nodes[t.ref_node].script}
" + name += f'
script: {nodes[t.ref_node].script}
' if t.input_params: - name += f"
"
+                    name += '
'
                     for p in t.input_params:
                         name += f"{p}={t.input_params[p]}
" name += "
" bases.append(f" {t.id}[{name}]") - bases.append(" style "+t.id+" fill:white;") + bases.append(" style " + t.id + " fill:white;") # render artifacts if SHOW_ARTIFACTS: for a in t.input_artifacts: - name = f"{a}" + name = f'{a}' # bases.append(f" {a_id}({name})") # bases.append(" style "+a_id+" fill:gold,stroke:#222;") if SHOW_ARTIFACTS and t.ref_node and nodes.get(t.ref_node): for a in nodes[t.ref_node].output_artifacts: a_id = t.id + "__" + a - name = f"{a}" + name = f'{a}' bases.append(f" {a_id}({name})") - bases.append(" style "+a_id+" fill:gold,stroke:#222;") + bases.append(" style " + a_id + " fill:gold,stroke:#222;") for t in n.steps: log.debug(f"rendering step: {t}") # render the step - name = f"{t.name}" + name = f'{t.name}' if t.when: when = t.when.replace("{{", "").replace("}}", "") - name += f"
when: {when}
" - if t.ref_node and nodes.get(t.ref_node) and not show_node(nodes[t.ref_node]): + name += f'
when: {when}
' + if ( + t.ref_node + and nodes.get(t.ref_node) + and not show_node(nodes[t.ref_node]) + ): if nodes[t.ref_node].image: - name += f"
image: {nodes[t.ref_node].image}
" + name += f'
image: {nodes[t.ref_node].image}
' if nodes[t.ref_node].script: - name += f"
script: {nodes[t.ref_node].script}
" + name += f'
script: {nodes[t.ref_node].script}
' if t.input_params: - name += f"
"
+                    name += '
'
                     for p in t.input_params:
                         name += f"{p}={t.input_params[p]}
" name += "
" bases.append(f" {t.id}[{name}]") - bases.append(" style "+t.id+" fill:white;") + bases.append(" style " + t.id + " fill:white;") # render artifacts if SHOW_ARTIFACTS: for a in t.input_artifacts: - name = f"{a}" + name = f'{a}' # bases.append(f" {a_id}({name})") # bases.append(" style "+a_id+" fill:gold,stroke:#222;") if SHOW_ARTIFACTS and t.ref_node and nodes.get(t.ref_node): for a in nodes[t.ref_node].output_artifacts: a_id = t.id + "__" + a - name = f"{a}" + name = f'{a}' bases.append(f" {a_id}({name})") - bases.append(" style "+a_id+" fill:gold,stroke:#222;") + bases.append(" style " + a_id + " fill:gold,stroke:#222;") bases.append("end") @@ -532,9 +604,13 @@ def generate_mermaid(workflows, nodes, output_name, output_file, workflow_readme if flow_lines: out.append(f"linkStyle {','.join(flow_lines)} stroke:#888,stroke-width:2px;") if interpackage_lines: - out.append(f"linkStyle {','.join(interpackage_lines)} stroke:#888,stroke-width:2px;") + out.append( + f"linkStyle {','.join(interpackage_lines)} stroke:#888,stroke-width:2px;" + ) if artifact_lines: - out.append(f"linkStyle {','.join(artifact_lines)} stroke:#fa0,stroke-width:2px;") + out.append( + f"linkStyle {','.join(artifact_lines)} stroke:#fa0,stroke-width:2px;" + ) mermaid_output = "\n".join(out) with open(output_file, "w") as f: @@ -563,8 +639,8 @@ def parse_depends(depends): class Tokenizer: def __init__(self, input_string): - for op in ('||', '&&', '!', '(', ')'): - input_string = input_string.replace(op, ' ' + op + ' ') + for op in ("||", "&&", "!", "(", ")"): + input_string = input_string.replace(op, " " + op + " ") self.tokens = input_string.split() self.position = 0 @@ -592,16 +668,16 @@ def pr(self, *args): def parse_expression(self): token = self.tokenizer.peek() - if token == '(': + if token == "(": self.parse_parentheses() - elif token == '!': + elif token == "!": self.parse_not() else: self.parse_term() def parse_term(self): token = self.tokenizer.consume() - if token is None or token in ('&&', '||', '!', ')'): + if token is None or token in ("&&", "||", "!", ")"): raise ValueError("Unexpected token: " + str(token)) self.term_prefixes.append(token.split(".")[0]) self.pr(f"term({token})") @@ -610,7 +686,7 @@ def parse_parentheses(self): self.pr("(") self.tokenizer.consume() # Consume '(' self.parse_or() - if self.tokenizer.consume() != ')': + if self.tokenizer.consume() != ")": raise ValueError("Missing closing parenthesis") self.pr(")") @@ -620,15 +696,17 @@ def parse_not(self): self.parse_expression() def parse_and(self): - left = self.parse_expression() - while self.tokenizer.peek() == '&&': + # TODO: this doesn't make sense, the variable was unused + _ = self.parse_expression() + while self.tokenizer.peek() == "&&": self.pr(" and ") self.tokenizer.consume() # Consume 'AND' self.parse_expression() def parse_or(self): - left = self.parse_and() - while self.tokenizer.peek() == '||': + # TODO: this doesn't make sense, the variable was unused + _ = self.parse_and() + while self.tokenizer.peek() == "||": self.pr(" or ") self.tokenizer.consume() # Consume 'OR' self.parse_and() @@ -641,7 +719,7 @@ def parse_or(self): def load_readme(file_path): try: - with open(file_path, 'r') as stream: + with open(file_path, "r") as stream: return stream.read() except Exception: log.exception("Problem loading README.md file") @@ -649,7 +727,7 @@ def load_readme(file_path): def parse_yaml(file_path): - with open(file_path, 'r') as stream: + with open(file_path, "r") as stream: try: return yaml.safe_load(stream) except yaml.YAMLError as exc: