From a316f3219fd3d98e74b25882ddfab3d9d4150a03 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 15:04:34 -0500 Subject: [PATCH 01/13] Move increment of acknowledged to after logging of ack data. --- tests/trx_generator/trx_provider.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index 1d26316b3a..5d122ee291 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -133,7 +133,6 @@ namespace eosio::testing { params, std::move(msg_body), [this, trx_id = trx.id()](boost::beast::error_code ec, boost::beast::http::response response) { - ++this->_acknowledged; trx_acknowledged(trx_id, fc::time_point::now()); if (this->needs_response_trace_info() && response.result() == boost::beast::http::status::ok) { @@ -178,6 +177,7 @@ namespace eosio::testing { elog("async_http_request Failed with response http status code: ${status}", ("status", response.result_int())); } + ++this->_acknowledged; }); ++_sent; } From e26f0a29d740c52270d890d5d822bc5d156593cf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 16:00:03 -0500 Subject: [PATCH 02/13] Add command line config parameters for read-only-write-window-time-us and read-only-read-window-time-us. Make default write window smaller and read window larger during read only tests (testApiOpMode). --- tests/CMakeLists.txt | 2 +- tests/PerformanceHarness/README.md | 6 ++++++ tests/PerformanceHarness/performance_test_basic.py | 12 ++++++++---- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 233824062b..3d530505c1 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -282,7 +282,7 @@ add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/Performance add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/PerformanceHarness/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --read-only-write-window-time-us 1000 --read-only-read-window-time-us 165000 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests) diff --git a/tests/PerformanceHarness/README.md b/tests/PerformanceHarness/README.md index 29c84f5536..ff848198f0 100644 --- a/tests/PerformanceHarness/README.md +++ b/tests/PerformanceHarness/README.md @@ -506,6 +506,8 @@ usage: PerformanceHarnessScenarioRunner.py findMax testBpOpMode overrideBasicTes [--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING] [--cpu-effort-percent CPU_EFFORT_PERCENT] [--producer-threads PRODUCER_THREADS] + [--read-only-write-window-time-us READ_ONLY_WRITE_WINDOW_TIME_US] + [--read-only-read-window-time-us READ_ONLY_READ_WINDOW_TIME_US] [--http-max-in-flight-requests HTTP_MAX_IN_FLIGHT_REQUESTS] [--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS] [--http-max-bytes-in-flight-mb HTTP_MAX_BYTES_IN_FLIGHT_MB] @@ -581,6 +583,10 @@ Performance Test Basic Base: Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80% --producer-threads PRODUCER_THREADS Number of worker threads in producer thread pool + --read-only-write-window-time-us READ_ONLY_WRITE_WINDOW_TIME_US + Time in microseconds the write window lasts. + --read-only-read-window-time-us READ_ONLY_READ_WINDOW_TIME_US + Time in microseconds the read window lasts. --http-max-in-flight-requests HTTP_MAX_IN_FLIGHT_REQUESTS Maximum number of requests http_plugin should use for processing http requests. 429 error response when exceeded. -1 for unlimited --http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS diff --git a/tests/PerformanceHarness/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py index d2d3b72fa0..7e4b59346e 100755 --- a/tests/PerformanceHarness/performance_test_basic.py +++ b/tests/PerformanceHarness/performance_test_basic.py @@ -639,7 +639,9 @@ def setupClusterConfig(args) -> ClusterConfig: producerPluginArgs = ProducerPluginArgs(disableSubjectiveApiBilling=args.disable_subjective_billing, disableSubjectiveP2pBilling=args.disable_subjective_billing, cpuEffortPercent=args.cpu_effort_percent, - producerThreads=args.producer_threads, maxTransactionTime=-1) + producerThreads=args.producer_threads, maxTransactionTime=-1, + readOnlyWriteWindowTimeUs=args.read_only_write_window_time_us, + readOnlyReadWindowTimeUs=args.read_only_read_window_time_us) httpPluginArgs = HttpPluginArgs(httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, httpMaxInFlightRequests=args.http_max_in_flight_requests, httpMaxResponseTimeMs=args.http_max_response_time_ms, httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) @@ -659,7 +661,7 @@ def setupClusterConfig(args) -> ClusterConfig: class PtbArgumentsHandler(object): @staticmethod - def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defValidationNodeCnt: int, defApiNodeCnt: int, suppressHelp: bool=False): + def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defValidationNodeCnt: int, defApiNodeCnt: int, defRoWriteWindowTimeUs: int, defRoReadWindowTimeUs: int, suppressHelp: bool=False): testHelperArgParser=TestHelper.createArgumentParser(includeArgs={"-d","--dump-error-details","-v","--leave-running" ,"--unshared"}, suppressHelp=suppressHelp) ptbBaseParser = argparse.ArgumentParser(parents=[testHelperArgParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -695,6 +697,8 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help=argparse.SUPPRESS if suppressHelp else "Disable subjective CPU billing for API/P2P transactions", default=True) ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help=argparse.SUPPRESS if suppressHelp else "Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in producer thread pool", default=2) + ptbBaseParserGroup.add_argument("--read-only-write-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the write window lasts.", default=defRoWriteWindowTimeUs) + ptbBaseParserGroup.add_argument("--read-only-read-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the read window lasts.", default=defRoReadWindowTimeUs) ptbBaseParserGroup.add_argument("--http-max-in-flight-requests", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum number of requests http_plugin should use for processing http requests. 429 error response when exceeded. -1 for unlimited", default=-1) ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum time for processing a request, -1 for unlimited", default=-1) ptbBaseParserGroup.add_argument("--http-max-bytes-in-flight-mb", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429\ @@ -728,11 +732,11 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa @staticmethod def createBaseBpP2pArgumentParser(suppressHelp: bool=False): - return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="p2p", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=0, suppressHelp=suppressHelp) + return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="p2p", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=0, defRoWriteWindowTimeUs=200000, defRoReadWindowTimeUs=60000, suppressHelp=suppressHelp) @staticmethod def createBaseApiHttpArgumentParser(suppressHelp: bool=False): - return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="http", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=1, suppressHelp=suppressHelp) + return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="http", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=1, defRoWriteWindowTimeUs=1000, defRoReadWindowTimeUs=165000, suppressHelp=suppressHelp) @staticmethod def createArgumentParser(): From da223ca18f36c7d5073d644f09cffe4ad4392c56 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 20:01:37 -0500 Subject: [PATCH 03/13] Use nodeos --full-version for better traceability. --- .github/workflows/ph_backward_compatibility.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index 0abe6b6400..d8f6640253 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -67,7 +67,8 @@ jobs: rm build/bin/cleos cp /usr/bin/nodeos build/bin cp /usr/bin/cleos build/bin - ./build/bin/nodeos --version + ./build/bin/nodeos --full-version + ./build/bin/cleos --full-version - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} name: Run Performance Tests ( Date: Thu, 21 Sep 2023 20:05:47 -0500 Subject: [PATCH 04/13] Add configurable leap-target override. Allows the workflow to be run on a specific branch to get a specific version of the Performance Harness while allowing the tester to override which version of nodeos and cleos should be tested. --- .../workflows/performance_harness_run.yaml | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index 7282bb1316..49c48356ab 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -12,6 +12,16 @@ on: override-test-params: description: 'Override perf harness params' type: string + override-leap: + description: Override leap target + type: string + override-leap-prerelease: + type: choice + description: Override leap prelease + options: + - default + - true + - false permissions: packages: read @@ -28,15 +38,25 @@ jobs: runs-on: ubuntu-latest outputs: test-params: ${{steps.overrides.outputs.test-params}} + leap-target: ${{steps.overrides.outputs.leap-target}} + leap-prerelease: ${{steps.overrides.outputs.leap-prerelease}} steps: - name: Setup Input Params id: overrides run: | echo test-params=findMax testBpOpMode >> $GITHUB_OUTPUT + echo leap-target="DEFAULT" >> $GITHUB_OUTPUT + echo leap-prerelease="DEFAULT" >> $GITHUB_OUTPUT if [[ "${{inputs.override-test-params}}" != "" ]]; then echo test-params=${{inputs.override-test-params}} >> $GITHUB_OUTPUT fi + if [[ "${{inputs.override-leap}}" != "" ]]; then + echo leap-target=${{inputs.override-leap}} >> $GITHUB_OUTPUT + fi + if [[ "${{inputs.override-leap-prerelease}}" == +(true|false) ]]; then + echo leap-prerelease=${{inputs.override-leap-prerelease}} >> $GITHUB_OUTPUT + fi platforms: name: Run Platforms Workflow @@ -99,9 +119,31 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{github.event.inputs.platform-choice}}-build - - name: Run Performance Test + - name: Extract Build Directory run: | zstdcat build.tar.zst | tar x + - if: ${{ needs.v.outputs.leap-target != 'DEFAULT' }} + name: Download Prev Leap Version + uses: AntelopeIO/asset-artifact-download-action@v3 + with: + owner: AntelopeIO + repo: leap + target: '${{needs.v.outputs.leap-target}}' + prereleases: ${{fromJSON(needs.v.outputs.leap-prerelease)}} + file: 'leap.*${{github.event.inputs.platform-choice}}.*(x86_64|amd64).deb' + - if: ${{ needs.v.outputs.leap-target != 'DEFAULT' }} + name: Install leap & replace binaries for PH use + run: | + apt-get update + apt-get install -y ./leap*.deb + rm build/bin/nodeos + rm build/bin/cleos + cp /usr/bin/nodeos build/bin + cp /usr/bin/cleos build/bin + ./build/bin/nodeos --full-version + ./build/bin/cleos --full-version + - name: Run Performance Test + run: | cd build ./tests/PerformanceHarnessScenarioRunner.py ${{needs.v.outputs.test-params}} - name: Prepare results From 207abdc6df8348e0c23942e790f58ed43bfb99bf Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 20:25:10 -0500 Subject: [PATCH 05/13] Set default to false. --- .github/workflows/performance_harness_run.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index 49c48356ab..4927faac2f 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -46,7 +46,7 @@ jobs: run: | echo test-params=findMax testBpOpMode >> $GITHUB_OUTPUT echo leap-target="DEFAULT" >> $GITHUB_OUTPUT - echo leap-prerelease="DEFAULT" >> $GITHUB_OUTPUT + echo leap-prerelease="false" >> $GITHUB_OUTPUT if [[ "${{inputs.override-test-params}}" != "" ]]; then echo test-params=${{inputs.override-test-params}} >> $GITHUB_OUTPUT From d20903fc6b412cddecdf1c9b9722e4c9ce670b16 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 20:37:26 -0500 Subject: [PATCH 06/13] Fix cleos version full subcommand structure. --- .github/workflows/performance_harness_run.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index 4927faac2f..a7cd9085f7 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -141,7 +141,7 @@ jobs: cp /usr/bin/nodeos build/bin cp /usr/bin/cleos build/bin ./build/bin/nodeos --full-version - ./build/bin/cleos --full-version + ./build/bin/cleos version full - name: Run Performance Test run: | cd build From 1dcf366fa357c8b95e7105a11dc98cb27631912a Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Thu, 21 Sep 2023 20:55:26 -0500 Subject: [PATCH 07/13] Fix ph_backward_compatibility workflow to use version full subcommand structure for cleos version. --- .github/workflows/ph_backward_compatibility.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml index d8f6640253..a5cdd04b10 100644 --- a/.github/workflows/ph_backward_compatibility.yaml +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -68,7 +68,7 @@ jobs: cp /usr/bin/nodeos build/bin cp /usr/bin/cleos build/bin ./build/bin/nodeos --full-version - ./build/bin/cleos --full-version + ./build/bin/cleos version full - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} name: Run Performance Tests ( Date: Thu, 21 Sep 2023 22:15:01 -0500 Subject: [PATCH 08/13] Revert addition of defaults for read and write window configuration in testApiOpMode as it breaks backward compatibility. --- tests/CMakeLists.txt | 2 +- tests/PerformanceHarness/performance_test_basic.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3d530505c1..f2aaf8d04e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -274,7 +274,7 @@ set_property(TEST gelf_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME performance_test_bp COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_api COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_read_only_trxs COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --read-only-write-window-time-us 1000 --read-only-read-window-time-us 165000 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/PerformanceHarness/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_cpu_trx_spec COMMAND tests/PerformanceHarnessScenarioRunner.py findMax testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/PerformanceHarness/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_p2p COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME performance_test_basic_http COMMAND tests/PerformanceHarnessScenarioRunner.py singleTest -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/PerformanceHarness/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py index 7e4b59346e..02f9e9b3bf 100755 --- a/tests/PerformanceHarness/performance_test_basic.py +++ b/tests/PerformanceHarness/performance_test_basic.py @@ -661,7 +661,7 @@ def setupClusterConfig(args) -> ClusterConfig: class PtbArgumentsHandler(object): @staticmethod - def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defValidationNodeCnt: int, defApiNodeCnt: int, defRoWriteWindowTimeUs: int, defRoReadWindowTimeUs: int, suppressHelp: bool=False): + def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defValidationNodeCnt: int, defApiNodeCnt: int, suppressHelp: bool=False): testHelperArgParser=TestHelper.createArgumentParser(includeArgs={"-d","--dump-error-details","-v","--leave-running" ,"--unshared"}, suppressHelp=suppressHelp) ptbBaseParser = argparse.ArgumentParser(parents=[testHelperArgParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -697,8 +697,8 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help=argparse.SUPPRESS if suppressHelp else "Disable subjective CPU billing for API/P2P transactions", default=True) ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help=argparse.SUPPRESS if suppressHelp else "Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in producer thread pool", default=2) - ptbBaseParserGroup.add_argument("--read-only-write-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the write window lasts.", default=defRoWriteWindowTimeUs) - ptbBaseParserGroup.add_argument("--read-only-read-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the read window lasts.", default=defRoReadWindowTimeUs) + ptbBaseParserGroup.add_argument("--read-only-write-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the write window lasts.", default=200000) + ptbBaseParserGroup.add_argument("--read-only-read-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the read window lasts.", default=60000) ptbBaseParserGroup.add_argument("--http-max-in-flight-requests", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum number of requests http_plugin should use for processing http requests. 429 error response when exceeded. -1 for unlimited", default=-1) ptbBaseParserGroup.add_argument("--http-max-response-time-ms", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum time for processing a request, -1 for unlimited", default=-1) ptbBaseParserGroup.add_argument("--http-max-bytes-in-flight-mb", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429\ @@ -732,11 +732,11 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa @staticmethod def createBaseBpP2pArgumentParser(suppressHelp: bool=False): - return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="p2p", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=0, defRoWriteWindowTimeUs=200000, defRoReadWindowTimeUs=60000, suppressHelp=suppressHelp) + return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="p2p", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=0, suppressHelp=suppressHelp) @staticmethod def createBaseApiHttpArgumentParser(suppressHelp: bool=False): - return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="http", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=1, defRoWriteWindowTimeUs=1000, defRoReadWindowTimeUs=165000, suppressHelp=suppressHelp) + return PtbArgumentsHandler._createBaseArgumentParser(defEndpointApiDef="http", defProdNodeCnt=1, defValidationNodeCnt=1, defApiNodeCnt=1, suppressHelp=suppressHelp) @staticmethod def createArgumentParser(): From 3ed0b74dc8cf5e1af5ab2ec624f6068b7b948b3b Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 22 Sep 2023 10:12:17 -0500 Subject: [PATCH 09/13] Try running on self-hosted runner. --- .github/workflows/performance_harness_run.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index a7cd9085f7..befad5a29b 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -111,7 +111,7 @@ jobs: name: Tests needs: [v, platforms, reuse-build, build-base] if: always() && needs.platforms.result == 'success' && (needs.build-base.result == 'success' || needs.reuse-build.result == 'success') - runs-on: ubuntu-latest + runs-on: ["self-hosted", "enf-x86-beefy-long"] container: image: ${{fromJSON(needs.platforms.outputs.p)[github.event.inputs.platform-choice].image}} steps: From 1be11b91158a4c358c7277b9d1689b50ef883592 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 22 Sep 2023 10:49:50 -0500 Subject: [PATCH 10/13] Shorten logs directory naming. --- tests/PerformanceHarness/README.md | 46 ++++++++++---------- tests/PerformanceHarness/performance_test.py | 6 +-- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/tests/PerformanceHarness/README.md b/tests/PerformanceHarness/README.md index ff848198f0..3c79c5f877 100644 --- a/tests/PerformanceHarness/README.md +++ b/tests/PerformanceHarness/README.md @@ -32,19 +32,19 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop 3. Collect Results - By default the Performance Harness will capture and save logs. To delete logs, use `--del-perf-logs`. Additionally, final reports will be collected by default. To omit final reports, use `--del-report` and/or `--del-test-report`. 1. Navigate to performance test logs directory ```bash - cd ./build/PerformanceHarnessScenarioRunnerLogs/ + cd ./build/PHSRLogs/ ``` 2. Log Directory Structure is hierarchical with each run of the `PerformanceHarnessScenarioRunner` reporting into a timestamped directory where it includes the full performance report as well as a directory containing output from each test type run (here, `PerformanceTestBasic`) and each individual test run outputs into a timestamped directory within `testRunLogs` that may contain block data logs and transaction generator logs as well as the test's basic report. An example directory structure follows:
Expand Example Directory Structure ``` bash - PerformanceHarnessScenarioRunnerLogs/ + PHSRLogs/ └── 2023-04-05_14-35-59 ├── pluginThreadOptRunLogs │   ├── chainThreadResults.txt │   ├── netThreadResults.txt - │   ├── PerformanceHarnessScenarioRunnerLogs + │   ├── PHSRLogs │   │   ├── 2023-04-05_14-35-59-50000 │   │   │   ├── blockDataLogs │   │   │   │   ├── blockData.txt @@ -163,7 +163,7 @@ Please refer to [Leap: Build and Install from Source](https://github.com/Antelop │   └── producerThreadResults.txt ├── report.json └── testRunLogs - └── PerformanceHarnessScenarioRunnerLogs + └── PHSRLogs ├── 2023-04-05_16-14-31-50000 │   ├── blockDataLogs │   │   ├── blockData.txt @@ -929,7 +929,7 @@ Next, a summary of the search scenario conducted and respective results is inclu "expectedTxns": 140010, "resultTxns": 140010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-49-42-14001" } } ``` @@ -1007,7 +1007,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 500000, "resultTxns": 295339, "testAnalysisBlockCnt": 41, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-39-08-50000" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-39-08-50000" } }, "1": { @@ -1029,7 +1029,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 250010, "resultTxns": 249933, "testAnalysisBlockCnt": 34, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-40-45-25001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-40-45-25001" } }, "2": { @@ -1051,7 +1051,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 125010, "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-42-10-12501" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-42-10-12501" } }, "3": { @@ -1073,7 +1073,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 190010, "resultTxns": 190010, "testAnalysisBlockCnt": 23, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-43-23-19001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-43-23-19001" } }, "4": { @@ -1095,7 +1095,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 160010, "resultTxns": 160010, "testAnalysisBlockCnt": 19, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-44-44-16001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-44-44-16001" } }, "5": { @@ -1117,7 +1117,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 145010, "resultTxns": 144898, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-46-01-14501" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-46-01-14501" } }, "6": { @@ -1139,7 +1139,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 135010, "resultTxns": 135010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-47-15-13501" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-47-15-13501" } }, "7": { @@ -1161,7 +1161,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 140010, "resultTxns": 140010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-48-29-14001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-48-29-14001" } } }, @@ -1201,7 +1201,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "expectedTxns": 140010, "resultTxns": 140010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-49-42-14001" } } }, @@ -1779,11 +1779,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "userTrxDataFile": null, "endpointMode": "p2p", "opModeCmd": "testBpOpMode", - "logDirBase": "PerformanceHarnessScenarioRunnerLogs", + "logDirBase": "PHSRLogs", "logDirTimestamp": "2023-08-18_16-16-57", - "logDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57", - "ptbLogsDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs", - "pluginThreadOptLogsDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/pluginThreadOptRunLogs" + "logDirPath": "PHSRLogs/2023-08-18_16-16-57", + "ptbLogsDirPath": "PHSRLogs/2023-08-18_16-16-57/testRunLogs", + "pluginThreadOptLogsDirPath": "PHSRLogs/2023-08-18_16-16-57/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -1799,7 +1799,7 @@ Finally, the full detail test report for each of the determined max TPS throughp ## Performance Test Basic Report -The Performance Test Basic generates, by default, a report that details results of the test, statistics around metrics of interest, as well as diagnostic information about the test run. If `PerformanceHarnessScenarioRunner.py findMax` is run with `--del-test-report`, or `PerformanceHarnessScenarioRunner.py singleTest` is run with `--del-report`, the report described below will not be written. Otherwise the report will be written to the timestamped directory within the `PerformanceHarnessScenarioRunnerLogs` log directory for the test run with the file name `data.json`. +The Performance Test Basic generates, by default, a report that details results of the test, statistics around metrics of interest, as well as diagnostic information about the test run. If `PerformanceHarnessScenarioRunner.py findMax` is run with `--del-test-report`, or `PerformanceHarnessScenarioRunner.py singleTest` is run with `--del-report`, the report described below will not be written. Otherwise the report will be written to the timestamped directory within the `PHSRLogs` log directory for the test run with the file name `data.json`.
Expand for full sample report @@ -1822,7 +1822,7 @@ The Performance Test Basic generates, by default, a report that details results "expectedTxns": 140010, "resultTxns": 140010, "testAnalysisBlockCnt": 17, - "logsDir": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001" + "logsDir": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-49-42-14001" }, "Analysis": { "BlockSize": { @@ -2430,7 +2430,7 @@ The Performance Test Basic generates, by default, a report that details results "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs", + "logDirRoot": "PHSRLogs/2023-08-18_16-16-57/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, @@ -2439,10 +2439,10 @@ The Performance Test Basic generates, by default, a report that details results "userTrxDataFile": null, "endpointMode": "p2p", "apiEndpoint": null, - "logDirBase": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs", + "logDirBase": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs", "logDirTimestamp": "2023-08-18_17-49-42", "logDirTimestampedOptSuffix": "-14001", - "logDirPath": "PerformanceHarnessScenarioRunnerLogs/2023-08-18_16-16-57/testRunLogs/PerformanceHarnessScenarioRunnerLogs/2023-08-18_17-49-42-14001", + "logDirPath": "PHSRLogs/2023-08-18_16-16-57/testRunLogs/PHSRunLogs/2023-08-18_17-49-42-14001", "userTrxData": "NOT CONFIGURED" }, "env": { diff --git a/tests/PerformanceHarness/performance_test.py b/tests/PerformanceHarness/performance_test.py index 4332450304..47e541c657 100755 --- a/tests/PerformanceHarness/performance_test.py +++ b/tests/PerformanceHarness/performance_test.py @@ -96,7 +96,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo self.testsStart = datetime.utcnow() - self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/f"{os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0]}Logs", + self.loggingConfig = PerformanceTest.LoggingConfig(logDirBase=Path(self.ptConfig.logDirRoot)/f"PHSRLogs", logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: @@ -117,7 +117,7 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode, trxGenerator=self.ptConfig.trxGenerator) - myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath=os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0]) + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="PHSRun") myTest.runTest() if myTest.testResult.testPassed: maxTpsAchieved = binSearchTarget @@ -160,7 +160,7 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode, trxGenerator=self.ptConfig.trxGenerator) - myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath=os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0]) + myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="PHSRun") myTest.runTest() if myTest.testResult.testPassed: maxTpsAchieved = searchTarget From 590fd51ba66a91e22773ec2f38fd6560fcc8ea98 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 22 Sep 2023 11:03:36 -0500 Subject: [PATCH 11/13] Fix name in workflow logs dir naming. --- .github/workflows/performance_harness_run.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index befad5a29b..b1cfb1ba3f 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -149,14 +149,14 @@ jobs: - name: Prepare results id: prep-results run: | - tar -pc build/PerformanceHarnessScenarioRunnerLogs | zstd --long -T0 -9 > PerformanceHarnessScenarioRunnerLogs.tar.zst + tar -pc build/PHSRLogs | zstd --long -T0 -9 > PHSRLogs.tar.zst - name: Upload results uses: AntelopeIO/upload-artifact-large-chunks-action@v1 with: name: performance-test-results - path: PerformanceHarnessScenarioRunnerLogs.tar.zst + path: PHSRLogs.tar.zst - name: Upload report uses: actions/upload-artifact@v3 with: name: performance-test-report - path: ./build/PerformanceHarnessScenarioRunnerLogs/**/report.json + path: ./build/PHSRLogs/**/report.json From 131f70540161af4c3557822fb15af8f364a662b5 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 22 Sep 2023 11:39:36 -0500 Subject: [PATCH 12/13] Need to use testUtil's log path here for nodeos logs. --- tests/PerformanceHarness/performance_test_basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/PerformanceHarness/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py index 02f9e9b3bf..d5e325adb7 100755 --- a/tests/PerformanceHarness/performance_test_basic.py +++ b/tests/PerformanceHarness/performance_test_basic.py @@ -226,7 +226,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.producerNodeId = self.clusterConfig._producerNodeIds[0] self.validationNodeId = self.clusterConfig._validationNodeIds[0] pid = os.getpid() - self.nodeosLogDir = Path(self.loggingConfig.logDirPath)/"var"/f"{self.testNamePath}{pid}" + self.nodeosLogDir = Path(self.loggingConfig.logDirPath)/"var"/f"{Utils.DataRoot}{Utils.PID}" self.nodeosLogPath = self.nodeosLogDir/f"node_{str(self.validationNodeId).zfill(2)}"/"stderr.txt" # Setup cluster and its wallet manager From 90cbd5ef5e1136667d8a89b35e279f08f55320d1 Mon Sep 17 00:00:00 2001 From: Peter Oschwald Date: Fri, 22 Sep 2023 11:40:00 -0500 Subject: [PATCH 13/13] Use new github large runner for performance harness runs. --- .github/workflows/performance_harness_run.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml index b1cfb1ba3f..ea49f4375b 100644 --- a/.github/workflows/performance_harness_run.yaml +++ b/.github/workflows/performance_harness_run.yaml @@ -111,7 +111,7 @@ jobs: name: Tests needs: [v, platforms, reuse-build, build-base] if: always() && needs.platforms.result == 'success' && (needs.build-base.result == 'success' || needs.reuse-build.result == 'success') - runs-on: ["self-hosted", "enf-x86-beefy-long"] + runs-on: ["Leap-Perf-Ubuntu-22-16x64x600"] container: image: ${{fromJSON(needs.platforms.outputs.p)[github.event.inputs.platform-choice].image}} steps: