Skip to content

Print bench outputs #18

Print bench outputs

Print bench outputs #18

Workflow file for this run

# Run final tests only when attempting to merge, shown as skipped status checks beforehand
name: Merge group tests
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
branches: [main]
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
gpu-benchmark:
# Test working run
#if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
name: Run fibonacci bench
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
# Install dependencies
- uses: actions-rs/toolchain@v1
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@v2
with:
tool: [email protected]
- name: Install criterion
run: |
cargo install cargo-criterion
cargo install criterion-table
# Checkout base branch for comparative bench
- uses: actions/checkout@v4
with:
ref: main
path: main
# Copy the script so the base can bench with the same parameters
- name: Copy source script to base branch
run: cd benches && cp justfile bench.env ../main/benches
- name: Set base ref variable
run: cd main && echo "BASE_REF=$(git rev-parse HEAD)" >> $GITHUB_ENV
- run: echo ${{ env.BASE_REF }}
- name: Run bench on base branch
run: cd main/benches && just --dotenv-filename bench.env bench fibonacci
- name: Copy bench output to PR branch for comparison
#cp main/${{ env.BASE_REF }}.json .
run: |
cp -r main/target/criterion ./target
#- name: Print base bench
# run: cat ${{ env.BASE_REF }}.json
- name: Run bench on PR branch
run: cd benches && just --dotenv-filename bench.env bench fibonacci
- name: Print PR bench
run: cat ${{ github.sha }}.json
# Create a `criterion-table` and write in commit comment
- name: Run `criterion-table`
run: cat ${{ github.sha }}.json | criterion-table > BENCHMARKS.md
#- name: Write bench on commit comment
# uses: peter-evans/commit-comment@v3
# with:
# body-path: BENCHMARKS.md
# TODO: Use jq for JSON parsing if needed
# Check for benchmark regression based on Criterion's configured noise threshold
- name: Performance regression check
id: check-regression
run: |
echo $(grep -c 'Regressed' ${{ github.sha }}.json)
echo "regress_count=$(grep -c 'Regressed' ${{ github.sha }}.json)" >> $GITHUB_OUTPUT
# Fail job if regression found
- uses: actions/github-script@v6
if: ${{ steps.check-regression.outputs.regress_count > 0 }}
with:
script: |
core.setFailed('Fibonacci bench regression detected')