diff --git a/.github/workflows/Announcements.yaml b/.github/workflows/Announcements.yaml index 09999dcc..6876c084 100644 --- a/.github/workflows/Announcements.yaml +++ b/.github/workflows/Announcements.yaml @@ -104,4 +104,3 @@ jobs: do MM_CHANNEL_ID="${channel}" ./src/notifications/send_to_mattermost.sh done - diff --git a/.github/workflows/Documentation.yaml b/.github/workflows/Documentation.yaml index 0febb6c5..e67f1210 100644 --- a/.github/workflows/Documentation.yaml +++ b/.github/workflows/Documentation.yaml @@ -10,7 +10,7 @@ on: oci-image-name: description: 'OCI image to generate the documentation for' required: true - external_ref_id: #(1) + external_ref_id: # (1) description: 'Optional ID for unique run detection' required: false type: string @@ -34,7 +34,7 @@ jobs: oci-img-path: ${{ steps.validate-image.outputs.img-path }} oci-img-name: ${{ steps.validate-image.outputs.img-name }} steps: - - name: ${{ inputs.external_ref_id }} #(2) + - name: ${{ inputs.external_ref_id }} # (2) if: ${{ github.event_name == 'workflow_dispatch' }} run: echo 'Started by ${{ inputs.external_ref_id }}' >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/Image.yaml b/.github/workflows/Image.yaml index 79bd6ed9..ae393492 100644 --- a/.github/workflows/Image.yaml +++ b/.github/workflows/Image.yaml @@ -24,7 +24,7 @@ on: required: true type: boolean default: false - external_ref_id: #(1) + external_ref_id: # (1) description: 'Optional ID for unique run detection' required: false type: string @@ -54,7 +54,7 @@ jobs: oci-img-path: ${{ steps.validate-image.outputs.img-path }} oci-img-name: ${{ steps.validate-image.outputs.img-name }} steps: - - name: ${{ inputs.external_ref_id }} #(2) + - name: ${{ inputs.external_ref_id }} # (2) if: ${{ github.event_name == 'workflow_dispatch' }} run: echo 'Started by ${{ inputs.external_ref_id }}' >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/Release.yaml b/.github/workflows/Release.yaml index c73fa403..dbc6a249 100644 --- a/.github/workflows/Release.yaml +++ b/.github/workflows/Release.yaml @@ -11,7 +11,7 @@ on: description: 'Cache key (to fetch image trigger from cache)' required: false type: string - external_ref_id: #(1) + external_ref_id: # (1) description: 'Optional ID for unique run detection' required: false type: string @@ -34,7 +34,7 @@ jobs: outputs: oci-image-name: ${{ steps.get-image-name.outputs.img-name }} steps: - - name: ${{ inputs.external_ref_id }} #(2) + - name: ${{ inputs.external_ref_id }} # (2) run: echo 'Started by ${{ inputs.external_ref_id }}' >> "$GITHUB_STEP_SUMMARY" - uses: actions/checkout@v4 @@ -174,8 +174,8 @@ jobs: steps: - uses: actions/checkout@v4 with: - fetch-depth: 0 - ref: ${{ matrix.canonical-tag }} + fetch-depth: 0 + ref: ${{ matrix.canonical-tag }} - uses: dev-drprasad/delete-tag-and-release@v1.0 # We force delete an existing tag because otherwise we won't get diff --git a/.github/workflows/Tests.yaml b/.github/workflows/Tests.yaml index e2a3fce0..ecd58cec 100644 --- a/.github/workflows/Tests.yaml +++ b/.github/workflows/Tests.yaml @@ -40,8 +40,8 @@ on: default: 'cache' type: choice options: - - cache - - registry + - cache + - registry cache-key: description: 'Cache key (when fetching from cache)' required: false @@ -51,7 +51,7 @@ on: required: true type: string default: '.vulnerability-report.json' - external_ref_id: #(1) + external_ref_id: # (1) description: 'Optional ID for unique run detection' required: false type: string @@ -72,7 +72,7 @@ jobs: outputs: test-cache-key: ${{ steps.cache.outputs.key }} steps: - - name: ${{ inputs.external_ref_id }} #(2) + - name: ${{ inputs.external_ref_id }} # (2) run: echo 'Started by ${{ inputs.external_ref_id }}' >> "$GITHUB_STEP_SUMMARY" - uses: actions/cache/restore@v4 diff --git a/.github/workflows/_Test-OCI-Factory.yaml b/.github/workflows/_Test-OCI-Factory.yaml index a86751e4..e9708a04 100644 --- a/.github/workflows/_Test-OCI-Factory.yaml +++ b/.github/workflows/_Test-OCI-Factory.yaml @@ -12,10 +12,58 @@ on: - "examples/**" - "oci/mock*" - "src/**" + - "tools/**" + - "tests/**" - "!tools/workflow-engine/**" - "!tools/cli-client/**" +env: + # local path to clone the oci-factory to + + # path of pytest junit output + PYTEST_RESULT_PATH: pytest_results.xml + + jobs: + + pytest: + # Trigger python unit tests across the repository + name: pytest + runs-on: ubuntu-22.04 + steps: + + # Job Setup + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - uses: actions/setup-python@v5 + with: + python-version: "3.x" + + # Note: Add additional dependency installation lines as required below + # test-oci-factory/pytest requirements + - run: pip install -r tests/etc/requirements.txt + + + - name: Run pytest + continue-on-error: true + run: | + python3 -m pytest --junit-xml "${{ env.PYTEST_RESULT_PATH }}" + + - name: Generate Summary + if: ${{ !cancelled() }} + run: | + python3 -m tools.junit_to_markdown --input-junit "${{ env.PYTEST_RESULT_PATH }}" >> $GITHUB_STEP_SUMMARY + + - name: Upload pytest Result + if: ${{ !cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ env.PYTEST_RESULT_PATH }} + path: ${{ env.PYTEST_RESULT_PATH }} + if-no-files-found: error + test-workflows: name: Trigger internal tests for mock-rock uses: ./.github/workflows/Image.yaml diff --git a/src/docs/generate_oci_doc_yaml.py b/src/docs/generate_oci_doc_yaml.py index c159e2b0..5853d3cc 100755 --- a/src/docs/generate_oci_doc_yaml.py +++ b/src/docs/generate_oci_doc_yaml.py @@ -265,9 +265,7 @@ def build_releases_data( # Set the support date if all_tracks.get(track_base): eol = parser.parse(all_tracks[track_base]) - release_data["support"] = { - "until": eol.strftime("%m/%Y") - } + release_data["support"] = {"until": eol.strftime("%m/%Y")} releases.append(release_data) diff --git a/src/image/prepare_single_image_build_matrix.py b/src/image/prepare_single_image_build_matrix.py index 2c44b9fb..b2657354 100755 --- a/src/image/prepare_single_image_build_matrix.py +++ b/src/image/prepare_single_image_build_matrix.py @@ -67,16 +67,21 @@ def validate_image_trigger(data: dict) -> None: if args.infer_image_track: import sys + sys.path.append("src/") from git import Repo from tempfile import TemporaryDirectory as tempdir from uploads.infer_image_track import get_base_and_track + with tempdir() as d: url = f"https://github.com/{builds[img_number]['source']}.git" repo = Repo.clone_from(url, d) repo.git.checkout(builds[img_number]["commit"]) # get the base image from the rockcraft.yaml file - with open(f"{d}/{builds[img_number]['directory']}/rockcraft.yaml", encoding="UTF-8") as rockcraft_file: + with open( + f"{d}/{builds[img_number]['directory']}/rockcraft.yaml", + encoding="UTF-8", + ) as rockcraft_file: rockcraft_yaml = yaml.load(rockcraft_file, Loader=yaml.BaseLoader) base_release, track = get_base_and_track(rockcraft_yaml) @@ -86,14 +91,16 @@ def validate_image_trigger(data: dict) -> None: with open( f"{args.revision_data_dir}/{builds[img_number]['revision']}", "w", - encoding="UTF-8" + encoding="UTF-8", ) as data_file: json.dump(builds[img_number], data_file) # Add dir_identifier to assemble the cache key and artefact path # No need to write it to rev data file since it's only used in matrix - builds[img_number]["dir_identifier"] = builds[img_number]["directory"].rstrip("/").replace("/", "_") - + builds[img_number]["dir_identifier"] = ( + builds[img_number]["directory"].rstrip("/").replace("/", "_") + ) + # set an output as a marker for later knowing if we need to release if "release" in builds[img_number]: release_to = "true" diff --git a/src/tests/get_released_revisions.py b/src/tests/get_released_revisions.py index eb64ad24..083861d8 100755 --- a/src/tests/get_released_revisions.py +++ b/src/tests/get_released_revisions.py @@ -94,9 +94,7 @@ def get_image_name_in_registry(img_name: str, revision: str) -> str: ) continue elif not risks.get("end-of-life"): - logging.warning( - f"Track {track} is missing its end-of-life field" - ) + logging.warning(f"Track {track} is missing its end-of-life field") for key, targets in risks.items(): if key == "end-of-life": diff --git a/src/uploads/infer_image_track.py b/src/uploads/infer_image_track.py index bf277e63..14087d81 100755 --- a/src/uploads/infer_image_track.py +++ b/src/uploads/infer_image_track.py @@ -57,7 +57,6 @@ def get_base_and_track(rockcraft_yaml) -> tuple[str, str]: ) as rockcraft_file: rockcraft_yaml = yaml.load(rockcraft_file, Loader=yaml.BaseLoader) - base_release, track = get_base_and_track(rockcraft_yaml) print(f"rock track: {track}") diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..0b7f7c68 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,3 @@ +from pathlib import Path + +DATA_DIR = Path(__file__).parent / "data" diff --git a/tests/data/junit_xml_failure.xml b/tests/data/junit_xml_failure.xml new file mode 100644 index 00000000..fb0ebe93 --- /dev/null +++ b/tests/data/junit_xml_failure.xml @@ -0,0 +1,18 @@ + + + + + + + + + + def test_example_failure(): + > assert False, "This is to exemplify the output of a failed unit test" + E AssertionError: This is to exemplify the output of a failed unit test + E assert False + oci-factory/src/docs/test/test_generate_oci_doc_yaml.py:8: AssertionError + + + + \ No newline at end of file diff --git a/tests/etc/requirements.txt b/tests/etc/requirements.txt new file mode 100644 index 00000000..fe93bd52 --- /dev/null +++ b/tests/etc/requirements.txt @@ -0,0 +1 @@ +pytest==8.3.2 diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/fixtures/buffers.py b/tests/fixtures/buffers.py new file mode 100644 index 00000000..694e9a44 --- /dev/null +++ b/tests/fixtures/buffers.py @@ -0,0 +1,9 @@ +import pytest +from io import StringIO + + +@pytest.fixture +def str_buff(): + """String IO fixture for simulating a file object""" + with StringIO() as buffer: + yield buffer diff --git a/tests/fixtures/junit_et.py b/tests/fixtures/junit_et.py new file mode 100644 index 00000000..31616c82 --- /dev/null +++ b/tests/fixtures/junit_et.py @@ -0,0 +1,13 @@ +import pytest +import xml.etree.ElementTree as ET +from .. import DATA_DIR + + +@pytest.fixture +def junit_with_failure(): + """Load ET of junit xml report with failure""" + sample = DATA_DIR / "junit_xml_failure.xml" + + tree = ET.parse(sample) + root = tree.getroot() + return root diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/test_junit_to_markdown_output.py b/tests/integration/test_junit_to_markdown_output.py new file mode 100644 index 00000000..41938951 --- /dev/null +++ b/tests/integration/test_junit_to_markdown_output.py @@ -0,0 +1,18 @@ +from ..fixtures.buffers import str_buff +from ..fixtures.junit_et import junit_with_failure +import tools.junit_to_markdown.convert as report + + +def test_print_redirection(junit_with_failure, str_buff, capsys): + """Ensure that the report is entirely redirected when needed""" + + report.print_junit_report(junit_with_failure, str_buff) + report.print_junit_report(junit_with_failure, None) # print report to stdout + + str_buff.seek(0) + str_buff_content = str_buff.read() + + captured = capsys.readouterr() + stdout_content = captured.out + + assert stdout_content == str_buff_content, "Printing to multiple locations." diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/test_junit_to_markdown_formatting.py b/tests/unit/test_junit_to_markdown_formatting.py new file mode 100644 index 00000000..d010c669 --- /dev/null +++ b/tests/unit/test_junit_to_markdown_formatting.py @@ -0,0 +1,148 @@ +from ..fixtures.buffers import str_buff +import tools.junit_to_markdown.convert as report + +import xml.etree.ElementTree as ET + + +def test_print_element(str_buff): + """Ensure printed elements match expected result""" + + input_xml = """ + + This is example content. + """ + + expected_result = """
+message: This is an example attr
+text: 
+This is example content.
+
+""" + + root = ET.fromstring(input_xml) + + report.print_element(root, str_buff) + + str_buff.seek(0) + result = str_buff.read() + + assert result == expected_result + + +def test_get_chart_data_order(): + """Ensure chart wedges are ordered correctly""" + + input_xml = """ + + This is example content. + + + """ + + # fmt: off + # name, value, colour, default_order + expected_result = [ + ('pass', 3, '#0f0', 4), + ('error', 3, '#fa0', 2), + ('failed', 3, '#f00', 1), + ('skipped', 1, '#ff0', 3) + ] + # fmt: on + + root = ET.fromstring(input_xml) + + result = report.get_chart_data(root) + assert result == expected_result + + +def test_get_chart_data_removal(): + """Ensure zero width chart wedges are removed""" + + input_xml = """ + + """ + + # fmt: off + # name, value, colour, default_order + expected_result = [ + ('pass', 10, '#0f0', 4), + ] + # fmt: on + + root = ET.fromstring(input_xml) + + result = report.get_chart_data(root) + assert result == expected_result + + +def test_get_testcase_status_not_pass(): + """Test correct status icon selection""" + + for status, expected_result in report.STATUS_ICONS.items(): + + input_xml = f""" + <{status}> + """ + + root = ET.fromstring(input_xml) + result = report.get_testcase_status(root) + + assert result == expected_result + + +def test_get_testcase_status_default(): + """Test default status icon selection""" + + input_xml = f""" + + """ + + root = ET.fromstring(input_xml) + result = report.get_testcase_status(root) + + assert result == report.DEFAULT_STATUS_ICON + + +def test_print_header(str_buff): + """Ensure header is printed correctly""" + + input_xml = """ + + """ + + root = ET.fromstring(input_xml) + report.print_header(root, str_buff) + str_buff.seek(0) + result = str_buff.read() + + result_split = result.split() + + assert "#" == result_split[0], "result is not formatted as a level 1 header" + assert ":white_check_mark:" in result_split, "result is missing icon" + assert "pytest" in result_split, "result is missing name" + + +def print_testsuite_report(str_buff): + + input_xml = """ + + + + + + + """ + + root = ET.fromstring(input_xml) + report.print_header(root, str_buff) + str_buff.seek(0) + result = str_buff.read() + result_lines = result.splitlines() + + assert "pytest" in result_lines[0], "result is missing header" + assert any("```mermaid" in line for line in result_lines), "result is missing chart" + + # this may change if
is used for any other purpose than testcases + assert ( + sum("
" in line for line in result_lines) == 2 + ), "result has incorrect testcase test count" diff --git a/tools/cli-client/snap/snapcraft.yaml b/tools/cli-client/snap/snapcraft.yaml index e6f060d5..a41c8968 100644 --- a/tools/cli-client/snap/snapcraft.yaml +++ b/tools/cli-client/snap/snapcraft.yaml @@ -1,4 +1,4 @@ -name: oci-factory # TODO 'snapcraft register ' +name: oci-factory # TODO 'snapcraft register ' base: core22 version: '0.0.1' summary: The OCI Factory CLI client to build, upload and release OCI images @@ -6,7 +6,7 @@ description: | The OCI Factory CLI client is a tool that builds, tests, and releases the OCI images owned by Canonical using the Github workflow in the OCI Factory repository. -grade: devel # must be 'stable' to release into candidate/stable channels +grade: devel # must be 'stable' to release into candidate/stable channels confinement: strict parts: diff --git a/tools/junit_to_markdown/__init__.py b/tools/junit_to_markdown/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tools/junit_to_markdown/__main__.py b/tools/junit_to_markdown/__main__.py new file mode 100644 index 00000000..2d55304e --- /dev/null +++ b/tools/junit_to_markdown/__main__.py @@ -0,0 +1,25 @@ +import argparse, sys +import xml.etree.ElementTree as ET +from .convert import print_junit_report + + +parser = argparse.ArgumentParser( + description="Generate markdown from a JUnit XML report for $GITHUB_STEP_SUMMARY" +) + +parser.add_argument( + "--input-junit", help="Path to JUnit XML Report", required=True, type=str +) + + +def main(): + args = parser.parse_args() + + tree = ET.parse(args.input_junit) + root = tree.getroot() + + print_junit_report(root, sys.stdout) + + +if __name__ == "__main__": + main() diff --git a/tools/junit_to_markdown/convert.py b/tools/junit_to_markdown/convert.py new file mode 100755 index 00000000..003281cd --- /dev/null +++ b/tools/junit_to_markdown/convert.py @@ -0,0 +1,145 @@ +#! /bin/env python3 +import xml.etree.ElementTree as ET +from io import TextIOBase +import json + +DEFAULT_STATUS_ICON = ":white_check_mark:" +STATUS_ICONS = { + "failure": ":x:", + "error": ":warning:", + "skipped": ":information_source:", + "information_source": ":x:", +} + + +def print_element(element: ET.Element, output: TextIOBase = None): + """Generically display attrs and text of a element""" + print(f"
", file=output)
+
+    for key, value in element.attrib.items():
+        print(f"{key}: {value}", file=output)
+
+    if element.text is not None:
+        if content := element.text.strip():
+            print(f"text: \n{content}", file=output)
+
+    print(f"
", file=output) + + +def get_chart_data(testsuite: ET.Element): + """Extract and order data used in pie chart""" + + failed_tests = int(testsuite.attrib.get("failures", 0)) + error_tests = int(testsuite.attrib.get("errors", 0)) + skipped_tests = int(testsuite.attrib.get("skipped", 0)) + total_tests = int(testsuite.attrib.get("tests", 0)) + + # passed test has to be inferred + pass_tests = total_tests - failed_tests - error_tests - skipped_tests + + # disable black autoformatter for a moment + # fmt: off + + # name, value, colour, default_order + chart_data = [ + ("failed", failed_tests, "#f00", 1), + ("error", error_tests, "#fa0", 2), + ("skipped", skipped_tests, "#ff0", 3), + ("pass", pass_tests, "#0f0", 4), + ] + # note: default_order ensures color match if two wedges have the exact same value + # fmt: on + + # filter out wedges with 0 width + chart_data = list(filter(lambda w: w[1] != 0, chart_data)) + + # sort by value, then default order so colors match what we expect + chart_data = list(sorted(chart_data, key=lambda w: (w[1], w[3]), reverse=True)) + + return chart_data + + +def print_testsuite_pie_chart(testsuite: ET.Element, output: TextIOBase = None): + """Generate a pie chart showing test status from testsuite element""" + + chart_data = get_chart_data(testsuite) + + # create the chart theme + theme_dict = { + "theme": "base", + "themeVariables": {f"pie{n+1}": w[2] for n, w in enumerate(chart_data)}, + } + + # begin printing pie chart... + print("```mermaid", file=output) + + # theme colors in order: pass, failed, error, skipped + # Note: init cannot be in quotes + print(f"%%{{init:{json.dumps(theme_dict)}}}%%", file=output) + + print(f"pie", file=output) + for key, value, _, _ in chart_data: + print(f'"{key}" : {value}', file=output) + + print("```", file=output) + + +def get_testcase_status(testcase: ET.Element): + """Get status for individual testcase elements""" + + for key, value in STATUS_ICONS.items(): + if testcase.find(key) is not None: + return value + + return DEFAULT_STATUS_ICON + + +def print_header(testsuite: ET.Element, output: TextIOBase = None): + """Print a header for the summary""" + passed = ( + testsuite.attrib.get("failures") == "0" + and testsuite.attrib.get("errors") == "0" + ) + status = ":white_check_mark:" if passed else ":x:" + name = testsuite.attrib["name"] + + print(f"# {status} {name}", file=output) + + +def print_testsuite_report(testsuite: ET.Element, output: TextIOBase = None): + """Print complete testsuite element Report""" + + print_header(testsuite, output) + + # use pie chart header as title + print_testsuite_pie_chart(testsuite, output) + + # print testsuite info + print_element(testsuite, output) + + # print each test case in collapsable section + for testcase in testsuite.findall("testcase"): + + print("
", file=output) + + test_status = get_testcase_status(testcase) + test_name = ( + testcase.attrib["name"].replace("_", " ").title() + ) # make the title look better + test_class = testcase.attrib["classname"] + print( + f"{test_status} {test_name} - {test_class}", file=output + ) + + for child in testcase.iter(): + print(f"{child.tag}", file=output) + print_element(child, output) + + print("
", file=output) + + +def print_junit_report(root: ET.Element, output: TextIOBase = None): + """Print report by iterating over all elements in root""" + + for testsuite in root.findall("testsuite"): + print_testsuite_report(testsuite, output)