Skip to content

Amaranth 0.5.3 (#754) #903

Amaranth 0.5.3 (#754)

Amaranth 0.5.3 (#754) #903

Workflow file for this run

name: Core Benchmarks
on:
push:
branches:
- master
pull_request:
types: [ synchronize, labeled ]
branches:
- master
workflow_dispatch:
jobs:
synthesis:
strategy:
matrix:
config: [basic, full]
name: Run synthesis benchmarks
if: >
github.ref == 'refs/heads/master' ||
(
github.event_name == 'pull_request' &&
(
(github.event.action == 'synchronize' && contains(github.event.pull_request.labels.*.name, 'benchmark')) ||
(github.event.action == 'labeled' && github.event.label.name == 'benchmark')
)
)
runs-on: ubuntu-latest
timeout-minutes: 40
container: ghcr.io/kuznia-rdzeni/amaranth-synth:ecp5-2023.11.19_v
steps:
- uses: actions/checkout@v4
- name: Set ownership (Github Actions workaround)
run: |
# https://github.com/actions/runner/issues/2033
chown -R $(id -u):$(id -g) $PWD
- name: Checkout submodules
run: git submodule update --init --recursive amaranth-stubs
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python3 -m venv venv
. venv/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements-dev.txt
- name: Synthesize
run: |
. venv/bin/activate
PYTHONHASHSEED=0 ./scripts/synthesize.py --verbose --strip-debug --config ${{ matrix.config }}
- name: Print synthesis information
run: cat ./build/top.tim
- name: Collect Benchmark information
run: |
. venv/bin/activate
./scripts/parse_benchmark_info.py -o synth-benchmark-${{ matrix.config }}.json
cat ./synth-benchmark-${{ matrix.config }}.json
- uses: actions/upload-artifact@v4
with:
name: synth_benchmark_results-${{ matrix.config }}
path: synth-benchmark-${{ matrix.config }}.json
build-perf-benchmarks:
name: Build performance benchmarks
runs-on: ubuntu-latest
if: >
github.ref == 'refs/heads/master' ||
(
github.event_name == 'pull_request' &&
(
(github.event.action == 'synchronize' && contains(github.event.pull_request.labels.*.name, 'benchmark')) ||
(github.event.action == 'labeled' && github.event.label.name == 'benchmark')
)
)
container: ghcr.io/kuznia-rdzeni/riscv-toolchain:2024.03.12
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Build embench
run: cd test/external/embench && make
- uses: actions/upload-artifact@v4
with:
name: "embench"
path: |
test/external/embench/build
run-perf-benchmarks:
name: Run performance benchmarks
runs-on: ubuntu-latest
timeout-minutes: 30
container: ghcr.io/kuznia-rdzeni/verilator:v5.008-2023.11.19_v
needs: build-perf-benchmarks
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set ownership (Github Actions workaround)
run: |
# https://github.com/actions/runner/issues/2033
chown -R $(id -u):$(id -g) $PWD
- name: Checkout submodules
run: git submodule update --init --recursive amaranth-stubs
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python3 -m venv venv
. venv/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements-dev.txt
- name: Generate Verilog
run: |
. venv/bin/activate
PYTHONHASHSEED=0 TRANSACTRON_VERBOSE=1 ./scripts/gen_verilog.py --verbose --config full
- uses: actions/download-artifact@v4
with:
name: "embench"
path: test/external/embench/build
- name: Run benchmarks
run: |
. venv/bin/activate
scripts/run_benchmarks.py -o perf_benchmark.json --summary perf.md
cat perf.md >> $GITHUB_STEP_SUMMARY
- uses: actions/upload-artifact@v4
with:
name: perf_benchmark_results
path: perf_benchmark.json
pr-summary:
name: Create benchmark summary for a pull request
if: ${{ github.event_name == 'pull_request' }}
runs-on: ubuntu-latest
timeout-minutes: 5
needs: [run-perf-benchmarks, synthesis]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set ownership (Github Actions workaround)
run: |
# https://github.com/actions/runner/issues/2033
chown -R $(id -u):$(id -g) $PWD
- name: Checkout submodules
run: git submodule update --init --recursive amaranth-stubs
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python3 -m venv venv
. venv/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements-dev.txt
- name: Get baseline benchmarks results
uses: actions/cache@v4
env:
cache-name: benchmark-results
with:
key: ${{ env.cache-name }}-${{ runner.os }}-${{ github.event.pull_request.base.sha }}
path: |
perf_benchmark.json
synth-benchmark-basic.json
synth-benchmark-full.json
- uses: actions/download-artifact@v4
with:
name: perf_benchmark_results
path: artifacts
- uses: actions/download-artifact@v4
with:
name: synth_benchmark_results-basic
path: artifacts
- uses: actions/download-artifact@v4
with:
name: synth_benchmark_results-full
path: artifacts
- name: Save PR number
run: |
mkdir -p ./summary
echo ${{ github.event.number }} > ./summary/pr_number
- name: Create comment
id: createComment
run: |
. venv/bin/activate
{
echo "## Benchmarks summary"
echo "### Performance benchmarks"
ci/print_benchmark_summary.py --precision 3 artifacts/perf_benchmark.json perf_benchmark.json
echo
echo "You can view all the metrics [here](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})."
echo "### Synthesis benchmarks (basic)"
ci/print_benchmark_summary.py --precision 0 artifacts/synth-benchmark-basic.json synth-benchmark-basic.json
echo
echo "### Synthesis benchmarks (full)"
ci/print_benchmark_summary.py --precision 0 artifacts/synth-benchmark-full.json synth-benchmark-full.json
echo
} >> summary/comment.md
- uses: actions/upload-artifact@v4
with:
name: benchmark-summary
path: summary
publish-benchmarks:
name: Publish benchmarks
if: github.ref == 'refs/heads/master'
runs-on: ubuntu-latest
timeout-minutes: 5
needs: [run-perf-benchmarks, synthesis]
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: perf_benchmark_results
path: .
- uses: actions/download-artifact@v4
with:
name: synth_benchmark_results-basic
path: .
- uses: actions/download-artifact@v4
with:
name: synth_benchmark_results-full
path: .
- name: Put results in a cache
uses: actions/cache@v4
env:
cache-name: benchmark-results
with:
key: ${{ env.cache-name }}-${{ runner.os }}-${{ github.sha }}
path: |
perf_benchmark.json
synth-benchmark-basic.json
synth-benchmark-full.json
- name: Store benchmark result (IPC)
uses: benchmark-action/github-action-benchmark@v1
with:
name: Performance (IPC)
tool: 'customBiggerIsBetter'
output-file-path: './perf_benchmark.json'
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
benchmark-data-dir-path: "dev/benchmark"
- name: Store synthesis benchmark result (basic)
uses: benchmark-action/github-action-benchmark@v1
with:
name: Fmax and LCs (basic)
tool: 'customBiggerIsBetter'
output-file-path: './synth-benchmark-basic.json'
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
benchmark-data-dir-path: "dev/benchmark"
- name: Store synthesis benchmark result (full)
uses: benchmark-action/github-action-benchmark@v1
with:
name: Fmax and LCs (full)
tool: 'customBiggerIsBetter'
output-file-path: './synth-benchmark-full.json'
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
benchmark-data-dir-path: "dev/benchmark"