From efd894b687074c950e592928e24fa0ba3b58a70e Mon Sep 17 00:00:00 2001 From: Oleg Smirnov Date: Sun, 30 Jun 2024 18:38:48 +0400 Subject: [PATCH] Add benchmark workflow for PR to monitor performance degradation (#149) --- .github/workflows/benchmark.yml | 178 +++++++++++++++--- .github/workflows/platform-benchmark.yml | 103 ---------- .../json/schema/benchmark/CommonBenchmarks.kt | 7 +- .../schema/benchmark/ComparisonBenchmark.kt | 4 +- 4 files changed, 156 insertions(+), 136 deletions(-) delete mode 100644 .github/workflows/platform-benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 83edda62..1a8ce34b 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -11,7 +11,7 @@ on: iterations: description: 'number of iterations in the benchmark' type: number - default: 3 + default: 10 required: false iteration-time: description: 'duration of individual integration in benchmark' @@ -25,31 +25,155 @@ on: required: false schedule: - cron: "0 2 * * 1" + push: + branches: + - main + pull_request: + +env: + REPORT_FORMAT: ${{ (github.event_name == 'push' || github.event_name == 'pull_request' ) && 'json' || 'csv' }} + +concurrency: + cancel-in-progress: true + group: bench-${{ github.event_name }}-${{ github.event.pull_request.number || github.event.after }} jobs: - check-linux: - uses: ./.github/workflows/platform-benchmark.yml - with: - run-on: ubuntu-latest - warmups: ${{ inputs.warmups }} - iterations: ${{ inputs.iterations }} - iteration-time: ${{ inputs.iteration-time }} - iteration-time-unit: ${{ inputs.iteration-time-unit }} - check-macos: - uses: ./.github/workflows/platform-benchmark.yml - with: - run-on: macos-latest - additional-task: "-x :benchmark:jvmBenchmark" - warmups: ${{ inputs.warmups }} - iterations: ${{ inputs.iterations }} - iteration-time: ${{ inputs.iteration-time }} - iteration-time-unit: ${{ inputs.iteration-time-unit }} - check-windows: - uses: ./.github/workflows/platform-benchmark.yml - with: - run-on: windows-latest - additional-task: "-x :benchmark:jvmBenchmark" - warmups: ${{ inputs.warmups }} - iterations: ${{ inputs.iterations }} - iteration-time: ${{ inputs.iteration-time }} - iteration-time-unit: ${{ inputs.iteration-time-unit }} + benchmark-matrix: + strategy: + matrix: + include: + # - os: ubuntu-latest + # additional-task: '' + - os: macos-latest + additional-task: '-x :benchmark:jvmBenchmark' + - os: macos-13 # for macosX64 + additional-task: '-x :benchmark:jvmBenchmark' + - os: windows-latest + additional-task: '-x :benchmark:jvmBenchmark' + runs-on: ${{ matrix.os }} + name: Run benchmarks on ${{ matrix.os }} + steps: + - name: 'Install native dependencies' + run: sudo apt-get install -y libunistring-dev + if: matrix.os == 'ubuntu-latest' + - name: 'Checkout Repository' + uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version-file: .java-version + - uses: actions/setup-python@v5 + with: + python-version-file: .python-version + - name: Validate Gradle Wrapper + uses: gradle/actions/wrapper-validation@v3 + - name: Cache konan + uses: actions/cache@v4 + with: + path: ~/.konan + key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }} + restore-keys: | + ${{ runner.os }}-gradle- + - name: Cache unicode data + uses: actions/cache@v4 + with: + path: unicode_dump + key: unicode-dump-${{ hashFiles('unicode_dump/*') }} + restore-keys: | + unicode-dump- + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v3 + with: + gradle-version: wrapper + - name: Run benchmarks + run: > + ./gradlew --no-daemon :benchmark:benchmark ${{ matrix.additional-task }} + -Pbenchmark_warmups=${{ inputs.warmups }} + -Pbenchmark_iterations=${{ inputs.iterations }} + -Pbenchmark_iteration_time=${{ inputs.iteration-time }} + -Pbenchmark_iteration_time_unit=${{ inputs.iteration-time-unit }} + -Pbenchmark_report_format=${{ env.REPORT_FORMAT }} + - name: Install CSV to MD converter + if: env.REPORT_FORMAT == 'csv' + run: pip install csv2md + - name: Add benchmark results to summary + shell: bash + if: env.REPORT_FORMAT == 'csv' + run: | + for report in $(find ./benchmark/build/reports/benchmarks/main -type f -name "*.csv") + do + file_name=$(basename "$report") + platform="${file_name%.*}" + echo "File $file_name" + # remove empty lines + sed -i -e '/^[[:space:]]*$/d' $report + echo "::group::Report CSV" + cat "$report" + echo "::endgroup::" + markdown_table=$(csv2md "$report") + echo "::group::Report Markdown" + echo "$markdown_table" + echo "::endgroup::" + echo "# Platform ${platform}" >> $GITHUB_STEP_SUMMARY + echo "$markdown_table" >> $GITHUB_STEP_SUMMARY + done + - name: Store results as artifact + if: env.REPORT_FORMAT == 'json' + uses: actions/upload-artifact@v4 + with: + name: bench-result-${{ matrix.os }} + path: benchmark/build/reports/benchmarks/main/**/*.json + + upload-benchmark-results: + if: (github.event_name == 'push' || github.event_name == 'pull_request') && github.repository == 'OptimumCode/json-schema-validator' + needs: + - benchmark-matrix + runs-on: ubuntu-latest + env: + RESULTS_DIR: bench-results + permissions: + # deployments permission to deploy GitHub pages website + deployments: write + # contents permission to update benchmark contents in gh-pages branch + contents: write + # pull-requests permission to create comments on PR in case of alert + pull-requests: write + steps: + - name: 'Checkout Repository' + uses: actions/checkout@v4 + - name: Download benchmark results + uses: actions/download-artifact@v4 + with: + path: ${{ env.RESULTS_DIR }} + merge-multiple: true + - name: Show downloaded artifacts + run: tree ${{ env.RESULTS_DIR }} + - name: Prepare and join benchmark reports + id: prep + run: | + for report in $(find ./${{ env.RESULTS_DIR }} -type f -name "*.json") + do + file_name=$(basename "$report") + platform="${file_name%.*}" + jq "[ .[] | .benchmark |= \"${platform}.\" + ltrimstr(\"io.github.optimumcode.json.schema.benchmark.\") ]" $report > ${{ env.RESULTS_DIR }}/$platform.json + done + AGGREGATED_REPORT=aggregated.json + # Joined reports looks like this: [[{},{}], [{},{}]] + # We need to transform them into this: [{},{}] + ls ${{ env.RESULTS_DIR }}/*.json + jq -s '[ .[] | .[] ]' ${{ env.RESULTS_DIR }}/*.json > $AGGREGATED_REPORT + echo "report=$AGGREGATED_REPORT" >> $GITHUB_OUTPUT + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: KMP JSON schema validator + tool: 'jmh' + output-file-path: ${{ steps.prep.outputs.report }} + alert-comment-cc-users: "@OptimumCode" + comment-on-alert: true + summary-always: true + alert-threshold: '50%' + fail-threshold: '100%' + github-token: ${{ secrets.GITHUB_TOKEN }} + # Push and deploy GitHub pages branch automatically only if run in main repo and not in PR + auto-push: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/platform-benchmark.yml b/.github/workflows/platform-benchmark.yml deleted file mode 100644 index de464485..00000000 --- a/.github/workflows/platform-benchmark.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: Reusable workflow to run a benchmark on the platform - -on: - workflow_call: - inputs: - run-on: - type: string - required: true - description: "runner to check the project" - additional-task: - type: string - description: additional task to add to gradle call - required: false - default: "" - warmups: - description: 'number of warmups run before the actual benchmark' - type: string - required: false - iterations: - description: 'number of iterations in the benchmark' - type: string - required: false - iteration-time: - description: 'duration of individual integration in benchmark' - type: string - required: false - iteration-time-unit: - description: 'timeunit for iteration-time parameter' - default: 's' - type: string - required: false - report-format: - description: 'format of the report' - default: 'csv' - type: string - required: false - -jobs: - benchmark: - runs-on: ${{ inputs.run-on }} - steps: - - name: 'Install native dependencies' - run: sudo apt-get install -y libunistring-dev - if: runner.os == 'Linux' - - name: 'Checkout Repository' - uses: actions/checkout@v4 - - uses: actions/setup-java@v4 - with: - distribution: temurin - java-version-file: .java-version - - uses: actions/setup-python@v5 - with: - python-version-file: .python-version - - name: Validate Gradle Wrapper - uses: gradle/actions/wrapper-validation@v3 - - name: Cache konan - uses: actions/cache@v4 - with: - path: ~/.konan - key: ${{ runner.os }}-gradle-${{ hashFiles('*.gradle.kts') }} - restore-keys: | - ${{ runner.os }}-gradle- - - name: Cache unicode data - uses: actions/cache@v4 - with: - path: unicode_dump - key: unicode-dump-${{ hashFiles('unicode_dump/*') }} - restore-keys: | - unicode-dump- - - name: Setup Gradle - uses: gradle/actions/setup-gradle@v3 - with: - gradle-version: wrapper - - name: Run benchmarks - run: > - ./gradlew --no-daemon :benchmark:benchmark ${{ inputs.additional-task }} - -Pbenchmark_warmups=${{ inputs.warmups }} - -Pbenchmark_iterations=${{ inputs.iterations }} - -Pbenchmark_iteration_time=${{ inputs.iteration-time }} - -Pbenchmark_iteration_time_unit=${{ inputs.iteration-time-unit }} - -Pbenchmark_report_format=${{ inputs.report-format }} - - name: Install CSV to MD converter - run: pip install csv2md - - name: Add benchmark results to summary - shell: bash - run: | - for report in $(find ./benchmark/build/reports/benchmarks/main -type f -name "*.csv") - do - file_name=$(basename "$report") - platform="${file_name%.*}" - echo "File $file_name" - # remove empty lines - sed -i -e '/^[[:space:]]*$/d' $report - echo "::group::Report CSV" - cat "$report" - echo "::endgroup::" - markdown_table=$(csv2md "$report") - echo "::group::Report Markdown" - echo "$markdown_table" - echo "::endgroup::" - echo "# Platform ${platform}" >> $GITHUB_STEP_SUMMARY - echo "$markdown_table" >> $GITHUB_STEP_SUMMARY - done diff --git a/benchmark/src/commonMain/kotlin/io/github/optimumcode/json/schema/benchmark/CommonBenchmarks.kt b/benchmark/src/commonMain/kotlin/io/github/optimumcode/json/schema/benchmark/CommonBenchmarks.kt index 853ca4b8..791f6032 100644 --- a/benchmark/src/commonMain/kotlin/io/github/optimumcode/json/schema/benchmark/CommonBenchmarks.kt +++ b/benchmark/src/commonMain/kotlin/io/github/optimumcode/json/schema/benchmark/CommonBenchmarks.kt @@ -1,8 +1,7 @@ package io.github.optimumcode.json.schema.benchmark import kotlinx.benchmark.BenchmarkMode -import kotlinx.benchmark.BenchmarkTimeUnit.MILLISECONDS -import kotlinx.benchmark.BenchmarkTimeUnit.SECONDS +import kotlinx.benchmark.BenchmarkTimeUnit import kotlinx.benchmark.Mode.AverageTime import kotlinx.benchmark.Mode.Throughput import kotlinx.benchmark.OutputTimeUnit @@ -11,7 +10,7 @@ import kotlinx.benchmark.Scope import kotlinx.benchmark.State @State(Scope.Benchmark) -@OutputTimeUnit(SECONDS) +@OutputTimeUnit(BenchmarkTimeUnit.SECONDS) @BenchmarkMode(Throughput) class CommonThroughputBench : AbstractCommonBenchmark() { @Param("object") @@ -22,7 +21,7 @@ class CommonThroughputBench : AbstractCommonBenchmark() { } @State(Scope.Benchmark) -@OutputTimeUnit(MILLISECONDS) +@OutputTimeUnit(BenchmarkTimeUnit.MICROSECONDS) @BenchmarkMode(AverageTime) class CommonAvgTimeBench : AbstractCommonBenchmark() { @Param("object") diff --git a/benchmark/src/jvmMain/kotlin/io/github/optimumcode/json/schema/benchmark/ComparisonBenchmark.kt b/benchmark/src/jvmMain/kotlin/io/github/optimumcode/json/schema/benchmark/ComparisonBenchmark.kt index 5e8e089f..a6a4eb1b 100644 --- a/benchmark/src/jvmMain/kotlin/io/github/optimumcode/json/schema/benchmark/ComparisonBenchmark.kt +++ b/benchmark/src/jvmMain/kotlin/io/github/optimumcode/json/schema/benchmark/ComparisonBenchmark.kt @@ -6,7 +6,7 @@ import kotlinx.benchmark.OutputTimeUnit import kotlinx.benchmark.Param import kotlinx.benchmark.Scope import kotlinx.benchmark.State -import java.util.concurrent.TimeUnit.MILLISECONDS +import java.util.concurrent.TimeUnit.MICROSECONDS import java.util.concurrent.TimeUnit.SECONDS @State(Scope.Benchmark) @@ -21,7 +21,7 @@ class ComparisonThroughputBenchmark : AbstractComparisonBenchmark() { } @State(Scope.Benchmark) -@OutputTimeUnit(MILLISECONDS) +@OutputTimeUnit(MICROSECONDS) @BenchmarkMode(Mode.AverageTime) class ComparisonAvgTimeBenchmark : AbstractComparisonBenchmark() { @Param("object")