Skip to content

Commit

Permalink
GitHub Actions workflows: parameterize over tests, add summary
Browse files Browse the repository at this point in the history
Co-authored-by: Benjamin Prevor <[email protected]>
  • Loading branch information
lschuermann and charles37 committed Nov 20, 2024
1 parent 4a930b8 commit a25f481
Show file tree
Hide file tree
Showing 3 changed files with 217 additions and 50 deletions.
43 changes: 42 additions & 1 deletion .github/workflows/treadmill-ci-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,46 @@ permissions:
contents: read

jobs:
treadmill-ci:
analyze-changes:
runs-on: ubuntu-latest

outputs:
hwci-tests-json: ${{ steps.analyze-changes.outputs.hwci-tests-json }}

steps:
# This is not run within the context of a repository that contains actual
# kernel / userspace code, so there is nothing for us to analyze. Instead
# we clone this very repository and select all test definitions:
- name: Checkout the tock-hardware-ci repository
uses: actions/checkout@v4
with:
path: tock-hardware-ci

- name: Checkout the tock/tock repository
uses: actions/checkout@v4
with:
# Checkout the repository at the commit that triggered the workflow
repository: tock/tock
path: tock-tock

- name: Select all defined tests
id: analyze-changes
run: |
# Run the select_tests.py script
python3 tock-hardware-ci/hwci/select_tests.py \
--repo-path tock-tock \
--hwci-path tock-hardware-ci/hwci \
--output selected_tests.json
echo "Selected HWCI tests:"
cat selected_tests.json
# Output the tests JSON
hwci_tests_json=$(cat selected_tests.json | jq -c '.')
echo "hwci-tests-json=${hwci_tests_json}" >> "$GITHUB_OUTPUT"
run-treadmill-ci:
needs: [analyze-changes]
uses: ./.github/workflows/treadmill-ci.yml
with:
# Only run on a specific repository, as others will not have the right
Expand All @@ -38,12 +77,14 @@ jobs:
# appropriate environment (depending on the on: triggers above)
job-environment: ${{ github.event_name == 'pull_request' && 'treadmill-ci' || 'treadmill-ci-merged' }}


# This workflow tests the tock-hardware-ci scripts itself, so take the
# current GITHUB_SHA:
tock-hardware-ci-ref: ${{ github.sha }}

# Use the latest upstream Tock kernel / userspace components:
tock-kernel-ref: 'master'
libtock-c-ref: 'master'
tests-json: ${{ needs.analyze-changes.outputs.hwci-tests-json }}

secrets: inherit
167 changes: 118 additions & 49 deletions .github/workflows/treadmill-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@
# [1]: https://book.treadmill.ci/
# [2]: https://github.com/treadmill-tb/treadmill
# [3]: https://book.treadmill.ci/user-guide/github-actions-integration.html
#

name: treadmill-ci

env:
TERM: xterm # Makes tput work in actions output

Expand All @@ -39,13 +41,18 @@ on:
libtock-c-ref:
required: true
type: string
tests-json:
required: false
type: string
default: '["tests/c_hello.py"]' # Default to single test for backward compatibility

jobs:
test-prepare:
runs-on: ubuntu-latest

# Do not run job on forks, as they will not have the correct environment set up
if: github.repository == inputs.repository-filter

environment: ${{ inputs.job-environment }}

outputs:
Expand Down Expand Up @@ -77,16 +84,6 @@ jobs:
popd
echo "$PWD/treadmill/target/debug" >> "$GITHUB_PATH"
# - uses: actions/checkout@v4
# with:
# repository: tock/tock
# ref: ${{ inputs.tock-kernel-ref }}
# path: tock

# - name: Analyze changes and determine types of tests to run
# run: |
# echo "TODO: implement this!"

- name: Generate a token to register new just-in-time runners
id: generate-token
uses: actions/create-github-app-token@v1
Expand All @@ -99,23 +96,21 @@ jobs:
env:
GH_TOKEN: ${{ steps.generate-token.outputs.token }}
TML_API_TOKEN: ${{ secrets.TREADMILL_API_TOKEN }}

# Currently, all tests run only on hosts attached to an nRF52840DK
DUT_BOARD: nrf52840dk

# A Raspberry Pi OS netboot (NBD) image with a GitHub Actions
# self-hosted runner pre-configured.
#
# For the available images see
# https://book.treadmill.ci/treadmillci-deployment/images.html
IMAGE_ID: 1b6900eff30f37b6d012240f63aa77a22e20934e7f6ebf38e25310552dc08378

IMAGE_ID: df24da6c7a03d87b1b6b55162383a9dfdf48a129b5f3e648748f0f9d11cdb470
# Limit the supervisors to hosts that are compatible with this
# image. This is a hack until we introduce "image sets" which define
# multiple images for various supervisor hosts, but otherwise behave
# identically:
HOST_TYPE: nbd-netboot
HOST_ARCH: arm64
TESTS_JSON: ${{ inputs.tests-json }}
run: |
# When we eventually launch tests on multiple hardware platforms in
# parallel, we need to supply different SUB_TEST_IDs here:
Expand All @@ -126,6 +121,7 @@ jobs:
# runner (connected to the exact board we want to run tests on).
RUNNER_ID="tml-gh-actions-runner-${GITHUB_REPOSITORY_ID}-${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-${SUB_TEST_ID}"
# Obtain a new just-in-time runner registration token:
RUNNER_CONFIG_JSON="$(gh api \
-H "Accept: application/vnd.github+json" \
Expand Down Expand Up @@ -162,28 +158,33 @@ jobs:
TML_JOB_ID="$(echo "$TML_JOB_ID_JSON" | jq -r .job_id)"
echo "Enqueued Treadmill job with ID $TML_JOB_ID"
# Pass the job IDs and other configuration data into the outputs of
# this step, such that we can run test-execute job instances for each
# Treadmill job we've started:
echo "tml-job-ids=[ \
\"$TML_JOB_ID\" \
]" >> "$GITHUB_OUTPUT"
echo "tml-jobs={ \
\"$TML_JOB_ID\": { \
\"runner-id\": \"$RUNNER_ID\", \
} \
}" >> "$GITHUB_OUTPUT"
TML_JOB_IDS_OUTPUT="[ \"$TML_JOB_ID\" ]"
echo "Setting tml-job-ids output to ${TML_JOB_IDS_OUTPUT}"
echo "tml-job-ids=${TML_JOB_IDS_OUTPUT}" >> "$GITHUB_OUTPUT"
TML_JOBS_OUTPUT="{ \"$TML_JOB_ID\": { \"runner-id\": \"$RUNNER_ID\", \"tests\": $TESTS_JSON } }"
echo "Setting tml-jobs output to ${TML_JOBS_OUTPUT}"
echo "tml-jobs=${TML_JOBS_OUTPUT}" >> "$GITHUB_OUTPUT"
# Generate an overview over the scheduled jobs and their respective tests:
cat <<GITHUB_STEP_SUMMARY >>"$GITHUB_STEP_SUMMARY"
## Test Overview
| Test | Board | Job |
|------|-------|-----|
GITHUB_STEP_SUMMARY
echo "$TESTS_JSON" | jq -r -c '.[]' | while read TEST; do
echo "| \`$TEST\` | \`$DUT_BOARD\` | [\`$TML_JOB_ID\`](#tml-job-summary-$TML_JOB_ID) |" >>"$GITHUB_STEP_SUMMARY"
done
test-execute:
needs: test-prepare

strategy:
matrix:
tml-job-id: ${{ fromJSON(needs.test-prepare.outputs.tml-job-ids) }}

runs-on: ${{ fromJSON(needs.test-prepare.outputs.tml-jobs)[matrix.tml-job-id].runner-id }}

steps:
- name: Print Treadmill Job Context and Debug Information
run: |
Expand All @@ -207,30 +208,10 @@ jobs:
run: |
echo "verbose = off" >> $HOME/.wgetrc
- name: Checkout the Tock Hardware CI scripts
uses: actions/checkout@v4
with:
repository: tock/tock-hardware-ci
ref: ${{ inputs.tock-hardware-ci-ref }}

- name: Checkout the Tock kernel repository
uses: actions/checkout@v4
with:
path: tock
repository: tock/tock
ref: ${{ inputs.tock-kernel-ref }}

- name: Checkout the libtock-c repository
uses: actions/checkout@v4
with:
path: libtock-c
repository: tock/libtock-c
ref: ${{ inputs.libtock-c-ref }}

- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
# Avoid overwriting the RUSTFLAGS environment variable
rustflags: ""
rustflags: ''

- name: Install required system packages
run: |
Expand All @@ -244,25 +225,113 @@ jobs:
git cargo openocd python3 python3-pip python3-serial \
python3-pexpect gcc-arm-none-eabi libnewlib-arm-none-eabi \
pkg-config libudev-dev cmake libusb-1.0-0-dev udev make \
gdb-multiarch gcc-arm-none-eabi build-essential || true
gdb-multiarch gcc-arm-none-eabi build-essential jq || true
# Install probe-rs:
curl --proto '=https' --tlsv1.2 -LsSf \
https://github.com/probe-rs/probe-rs/releases/latest/download/probe-rs-tools-installer.sh \
| sh
- name: Checkout the Tock Hardware CI scripts
uses: actions/checkout@v4
with:
repository: tock/tock-hardware-ci
ref: ${{ inputs.tock-hardware-ci-ref }}

- name: Checkout the Tock kernel repository
uses: actions/checkout@v4
with:
path: tock
repository: tock/tock
ref: ${{ inputs.tock-kernel-ref }}

- name: Checkout the libtock-c repository
uses: actions/checkout@v4
with:
path: libtock-c
repository: tock/libtock-c
ref: ${{ inputs.libtock-c-ref }}
fetch-depth: 0
submodules: false
persist-credentials: true

- name: Create Python virtual environment and install required dependencies
run: |
python3 -m venv ./hwcienv
source ./hwcienv/bin/activate
pip install -r hwci/requirements.txt -c hwci/requirements-frozen.txt
- name: Run tests
env:
JSON_TEST_ARRAY: ${{ toJSON(fromJSON(needs.test-prepare.outputs.tml-jobs)[matrix.tml-job-id].tests) }}
run: |
source ./hwcienv/bin/activate
cd ./hwci
export PYTHONPATH="$PWD:$PYTHONPATH"
python3 core/main.py --board boards/nrf52dk.py --test tests/c_hello.py
STEP_FAIL=0
# Generate a summary of all the tests executed:
cat <<GITHUB_STEP_SUMMARY >>"$GITHUB_STEP_SUMMARY"
### <a id="tml-job-summary-${{ matrix.tml-job-id }}"></a>Tests executed on board \`nrf52840dk\`, job ID ${{ matrix.tml-job-id }}
| Result | Test |
|--------|------|
GITHUB_STEP_SUMMARY
while read TEST; do
# WHILE ---------->
# For markdown links, we want a sanitized test name without special characters
SANITIZED_TEST_NAME="${TEST//[^[:alnum:]]/_}"
# Prepare a collapsible section in the test output:
cat <<STEP_SUMMARY_DETAILS >>"./step-summary-details.txt"
<details>
<summary>Test \`$TEST\`</summary>
##### <a id="#test-$SANITIZED_TEST_NAME"></a> Test \`$TEST\`
\`\`\`
STEP_SUMMARY_DETAILS
# Run the test script, saving its output:
echo "===== RUNNING TEST $TEST ====="
FAIL=0
set -o pipefail
python3 core/main.py --board boards/nrf52dk.py --test "$TEST" 2>&1 | tee ./job-output.txt || FAIL=1
set +o pipefail
# Insert the result into the markdown table:
if [ "$FAIL" == "0" ]; then
echo "| ✅ | \`$TEST\` ([Output](#test-$SANITIZED_TEST_NAME)) |" >>"$GITHUB_STEP_SUMMARY"
else
echo "===== Test $TEST failed! ====="
echo "| ❌ | \`$TEST\` ([Output](#test-$SANITIZED_TEST_NAME)) |" >>"$GITHUB_STEP_SUMMARY"
STEP_FAIL=1
fi
# Sanitize the output (remove triple backslashes) and copy it into the step summary details:
cat ./job-output.txt | sed 's/```//g' >>"./step-summary-details.txt"
# Finalize this section of the test output summary:
cat <<STEP_SUMMARY_DETAILS >>"./step-summary-details.txt"
\`\`\`
</details>
STEP_SUMMARY_DETAILS
# -----------> DONE
done < <(echo "$JSON_TEST_ARRAY" | jq -r -c '.[]')
# Finally, add all test output sections to the overall step summary:
echo "" >>"$GITHUB_STEP_SUMMARY"
cat "./step-summary-details.txt" >>"$GITHUB_STEP_SUMMARY"
# Exit with an error if at least one test failed:
if [ "$STEP_FAIL" != "0" ]; then
echo "At least one test failed, exiting with error."
exit 1
fi
- name: Request shutdown after successful job completion
run: |
Expand Down
57 changes: 57 additions & 0 deletions hwci/select_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Licensed under the Apache License, Version 2.0 or the MIT License.
# SPDX-License-Identifier: Apache-2.0 OR MIT
# Copyright Tock Contributors 2024.

#!/usr/bin/env python3

import os
import argparse
import json


def main():
parser = argparse.ArgumentParser(description="Select all HWCI tests.")
parser.add_argument(
"--repo-path",
type=str,
default=".",
help="Path to the tock/tock repository to analyze",
)
parser.add_argument(
"--hwci-path",
type=str,
required=True,
help="Path to the tock-hardware-ci repository",
)
parser.add_argument(
"--output",
type=str,
default="selected_tests.json",
help="Output JSON file for selected tests",
)
args = parser.parse_args()

# For now, we ignore the repo-path (tock/tock repository) since we are not analyzing changes yet
# In the future, we will use repo-path to analyze the changes and select tests accordingly

# Path to the tests directory within the tock-hardware-ci repository
tests_dir = os.path.join(args.hwci_path, "tests")

# Find all .py files in the tests directory
test_files = []
for root, dirs, files in os.walk(tests_dir):
for file in files:
if file.endswith(".py"):
# Get the relative path to the test file
test_path = os.path.relpath(os.path.join(root, file), args.hwci_path)
test_files.append(test_path)

# Output the list of test files as a JSON array
with open(args.output, "w") as f:
json.dump(test_files, f)

print(f"Selected HWCI tests: {test_files}")


if __name__ == "__main__":
main()

0 comments on commit a25f481

Please sign in to comment.