diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5b689ce757..4dccdddd5b 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -39,7 +39,9 @@ jobs: contents: read with: runs-on: '["self-hosted", "enf-x86-beefy"]' - platform-files: .cicd/platforms + platform-files: | + .cicd/platforms + tools/reproducible.Dockerfile:builder build-base: name: Run Build Workflow @@ -77,13 +79,13 @@ jobs: echo eos-system-contracts-ref=${{inputs.override-eos-system-contracts}} >> $GITHUB_OUTPUT fi - dev-package: - name: Build leap-dev package + package: + name: Build deb packages needs: [platform-cache, build-base] strategy: fail-fast: false matrix: - platform: [ubuntu20, ubuntu22] + platform: [ubuntu20, ubuntu22, reproducible] runs-on: ubuntu-latest container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}} steps: @@ -94,41 +96,55 @@ jobs: uses: actions/download-artifact@v3 with: name: ${{matrix.platform}}-build - - name: Build dev package + - name: Build packages run: | zstdcat build.tar.zst | tar x cd build cpack + ../tools/tweak-deb.sh leap_*.deb - name: Install dev package + if: matrix.platform != 'reproducible' run: | apt-get update && apt-get upgrade -y apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness + if: matrix.platform != 'reproducible' run: | python3 -c "from TestHarness import Cluster" - name: Upload dev package uses: actions/upload-artifact@v3 + if: matrix.platform != 'reproducible' with: name: leap-dev-${{matrix.platform}}-amd64 path: build/leap-dev*.deb + - name: Upload leap package + uses: actions/upload-artifact@v3 + if: matrix.platform == 'reproducible' + with: + name: leap-deb-amd64 + path: build/leap_*.deb tests: - name: Tests + name: Tests (${{matrix.cfg.name}}) needs: [platform-cache, build-base] strategy: fail-fast: false matrix: - platform: [ubuntu20, ubuntu22] + include: + - cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'} + - cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'} + - cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'} + - cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'} runs-on: ["self-hosted", "enf-x86-hightier"] container: - image: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}} + image: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}} options: --security-opt seccomp=unconfined steps: - uses: actions/checkout@v3 - name: Download builddir uses: actions/download-artifact@v3 with: - name: ${{matrix.platform}}-build + name: ${{matrix.cfg.builddir}}-build - name: Run Parallel Tests run: | # https://github.com/actions/runner/issues/2033 -- need this because of full version label test looking at git revs @@ -140,66 +156,74 @@ jobs: run: awk 'BEGIN {err = 1} /bmi2/ && /adx/ {err = 0} END {exit err}' /proc/cpuinfo np-tests: - name: NP Tests + name: NP Tests (${{matrix.cfg.name}}) needs: [platform-cache, build-base] strategy: fail-fast: false matrix: - platform: [ubuntu20, ubuntu22] + include: + - cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'} + - cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'} + - cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'} + - cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'} runs-on: ["self-hosted", "enf-x86-midtier"] steps: - uses: actions/checkout@v3 - name: Download builddir uses: actions/download-artifact@v3 with: - name: ${{matrix.platform}}-build + name: ${{matrix.cfg.builddir}}-build - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}} + container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' - log-tarball-prefix: ${{matrix.platform}} + log-tarball-prefix: ${{matrix.cfg.name}} tests-label: nonparallelizable_tests test-timeout: 420 - name: Upload logs from failed tests uses: actions/upload-artifact@v3 if: failure() with: - name: ${{matrix.platform}}-np-logs + name: ${{matrix.cfg.name}}-np-logs path: '*-logs.tar.gz' lr-tests: - name: LR Tests + name: LR Tests (${{matrix.cfg.name}}) needs: [platform-cache, build-base] strategy: fail-fast: false matrix: - platform: [ubuntu20, ubuntu22] + include: + - cfg: {name: 'ubuntu20', base: 'ubuntu20', builddir: 'ubuntu20'} + - cfg: {name: 'ubuntu22', base: 'ubuntu22', builddir: 'ubuntu22'} + - cfg: {name: 'ubuntu20repro', base: 'ubuntu20', builddir: 'reproducible'} + - cfg: {name: 'ubuntu22repro', base: 'ubuntu22', builddir: 'reproducible'} runs-on: ["self-hosted", "enf-x86-lowtier"] steps: - uses: actions/checkout@v3 - name: Download builddir uses: actions/download-artifact@v3 with: - name: ${{matrix.platform}}-build + name: ${{matrix.cfg.builddir}}-build - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.platform].image}} + container: ${{fromJSON(needs.platform-cache.outputs.platforms)[matrix.cfg.base].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' - log-tarball-prefix: ${{matrix.platform}} + log-tarball-prefix: ${{matrix.cfg.name}} tests-label: long_running_tests test-timeout: 1800 - name: Upload logs from failed tests uses: actions/upload-artifact@v3 if: failure() with: - name: ${{matrix.platform}}-lr-logs + name: ${{matrix.cfg.name}}-lr-logs path: '*-logs.tar.gz' libtester-tests: name: libtester tests - needs: [platform-cache, build-base, v, dev-package] + needs: [platform-cache, build-base, v, package] strategy: fail-fast: false matrix: @@ -290,9 +314,9 @@ jobs: all-passing: name: All Required Tests Passed - needs: [dev-package, tests, np-tests, libtester-tests] + needs: [tests, np-tests, libtester-tests] if: always() runs-on: ubuntu-latest steps: - - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success' + - if: needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success' run: false diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000000..dfcd19869d --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,52 @@ +name: Release Actions + +on: + release: + types: [published] + +jobs: + eb: + name: experimental-binaries + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + actions: read + steps: + - name: Get ubuntu20 leap-dev.deb + uses: AntelopeIO/asset-artifact-download-action@v3 + with: + owner: ${{github.repository_owner}} + repo: ${{github.event.repository.name}} + file: 'leap-dev.*amd64.deb' + target: ${{github.sha}} + artifact-name: leap-dev-ubuntu20-amd64 + wait-for-exact-target-workflow: true + - name: Get ubuntu22 leap-dev.deb + uses: AntelopeIO/asset-artifact-download-action@v3 + with: + owner: ${{github.repository_owner}} + repo: ${{github.event.repository.name}} + file: 'leap-dev.*amd64.deb' + target: ${{github.sha}} + artifact-name: leap-dev-ubuntu22-amd64 + wait-for-exact-target-workflow: true + - name: Create Dockerfile + run: | + cat < Dockerfile + FROM scratch + LABEL org.opencontainers.image.description="A collection of experimental Leap binary packages" + COPY *.deb / + EOF + - name: Login to ghcr + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{github.repository_owner}} + password: ${{github.token}} + - name: Build and push experimental-binaries + uses: docker/build-push-action@v3 + with: + push: true + tags: ghcr.io/${{github.repository_owner}}/experimental-binaries:${{github.ref_name}} + context: . diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9b0c72fe4f..378b0a56fd 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -614,8 +614,6 @@ struct controller_impl { ilog( "Snapshot loaded, lib: ${lib}", ("lib", head->block_num) ); init(std::move(check_shutdown)); - if (conf.revert_to_private_mode) - db.revert_to_private_mode(); auto snapshot_load_time = (fc::time_point::now() - snapshot_load_start_time).to_seconds(); ilog( "Finished initialization from snapshot (snapshot load time was ${t}s)", ("t", snapshot_load_time) ); } catch (boost::interprocess::bad_alloc& e) { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 2f019a7205..3189209502 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -81,7 +81,6 @@ namespace eosio { namespace chain { bool disable_replay_opts = false; bool contracts_console = false; bool allow_ram_billing_in_notify = false; - bool revert_to_private_mode = false; uint32_t maximum_variable_signature_length = chain::config::default_max_variable_signature_length; bool disable_all_subjective_mitigations = false; //< for developer & testing purposes, can be configured using `disable-all-subjective-mitigations` when `EOSIO_DEVELOPER` build option is provided uint32_t terminate_at_block = 0; diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h index 28a8bd962e..03efd64d1e 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h @@ -7,7 +7,9 @@ #ifdef __cplusplus #include #include -namespace eosio { namespace chain {class apply_context;}} +namespace eosio::chain { + class apply_context; +} #endif struct eos_vm_oc_control_block { @@ -38,3 +40,9 @@ struct eos_vm_oc_control_block { int64_t max_linear_memory_pages; void* globals; }; + +#ifdef __cplusplus +namespace eosio::chain::eosvmoc { + using control_block = eos_vm_oc_control_block; +} +#endif \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp index 6c8d6f9fb9..95583457bd 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp @@ -12,13 +12,7 @@ #include #include -namespace eosio { namespace chain { - -class apply_context; - -namespace eosvmoc { - -using control_block = eos_vm_oc_control_block; +namespace eosio::chain::eosvmoc { struct no_offset{}; struct code_offset { @@ -52,7 +46,7 @@ enum eosvmoc_exitcode : int { static constexpr uint8_t current_codegen_version = 1; -}}} +} FC_REFLECT(eosio::chain::eosvmoc::no_offset, ); FC_REFLECT(eosio::chain::eosvmoc::code_offset, (offset)); diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp index 2197878d30..6819b58d74 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp @@ -1,14 +1,15 @@ #pragma once +#include #include -#include +#include #include #include #include #include -namespace eosio { namespace chain { namespace eosvmoc { +namespace eosio::chain::eosvmoc { class memory { static constexpr uint64_t intrinsic_count = intrinsic_table_size(); @@ -66,7 +67,7 @@ class memory { uint8_t* fullpage_base; }; -}}} +} #define OFFSET_OF_CONTROL_BLOCK_MEMBER(M) (-(int)eosio::chain::eosvmoc::memory::cb_offset + (int)offsetof(eosio::chain::eosvmoc::control_block, M)) #define OFFSET_OF_FIRST_INTRINSIC ((int)-eosio::chain::eosvmoc::memory::first_intrinsic_offset) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/compile_monitor.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/compile_monitor.cpp index d74fe26574..01ae7ecaba 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/compile_monitor.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/compile_monitor.cpp @@ -114,7 +114,13 @@ struct compile_monitor_session { void read_message_from_compile_task(std::list>::iterator current_compile_it) { auto& [code, socket] = *current_compile_it; socket.async_wait(local::datagram_protocol::socket::wait_read, [this, current_compile_it](auto ec) { - //at this point we only expect 1 of 2 things to happen: we either get a reply (success), or we get no reply (failure) + //at this point we generally expect 1 of 2 things to happen: we either get a reply (success), or we get an error reading from the + // socket (failure). But there is also a third possibility that this compile_monitor_session is being destroyed and thus the + // socket is being destroyed by way of current_compiles being destroyed. Since this is an async_wait() and not an async_read(), + // for now just consider any error as being due to cancellation at dtor time and completely bail out (there aren't many other + // potential errors for an asnyc_wait) + if(ec) + return; auto& [code, socket] = *current_compile_it; auto [success, message, fds] = read_message_with_fds(socket); diff --git a/libraries/chainbase b/libraries/chainbase index 13c9c35e39..7615ddab28 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 13c9c35e393f1739c053ff7a03edb4d9df30990d +Subproject commit 7615ddab287e06fd31f800e66fe39b3a19320ec8 diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index aed684572e..52666f789c 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -931,13 +931,6 @@ void chain_plugin_impl::plugin_initialize(const variables_map& options) { chain_config->db_map_mode = options.at("database-map-mode").as(); - // when loading a snapshot, all the state will be modified, so temporarily use the `mapped` mode instead - // of `mapped_private` to lower memory requirements. - if (snapshot_path && chain_config->db_map_mode == pinnable_mapped_file::mapped_private) { - chain_config->db_map_mode = pinnable_mapped_file::mapped; - chain_config->revert_to_private_mode = true; // revert to `mapped_private` mode after loading snapshot. - } - #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if( options.count("eos-vm-oc-cache-size-mb") ) chain_config->eosvmoc_config.cache_size = options.at( "eos-vm-oc-cache-size-mb" ).as() * 1024u * 1024u; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 8008025f73..ed27ad8ab1 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -861,7 +861,7 @@ namespace eosio { size_t get_bytes_sent() const { return bytes_sent.load(); } std::chrono::nanoseconds get_last_bytes_sent() const { return last_bytes_sent.load(); } size_t get_block_sync_bytes_received() const { return block_sync_bytes_received.load(); } - size_t get_block_sync_bytes_sent() const { return block_sync_bytes_sent.load(); } + size_t get_block_sync_bytes_sent() const { return block_sync_total_bytes_sent.load(); } bool get_block_sync_throttling() const { return block_sync_throttling.load(); } boost::asio::ip::port_type get_remote_endpoint_port() const { return remote_endpoint_port.load(); } void set_heartbeat_timeout(std::chrono::milliseconds msec) { @@ -900,7 +900,9 @@ namespace eosio { std::atomic last_bytes_received{0ns}; std::atomic bytes_sent{0}; std::atomic block_sync_bytes_received{0}; - std::atomic block_sync_bytes_sent{0}; + std::atomic block_sync_total_bytes_sent{0}; + std::chrono::nanoseconds block_sync_send_start{0ns}; // start of enqueue blocks + size_t block_sync_frame_bytes_sent{0}; // bytes sent in this set of enqueue blocks std::atomic block_sync_throttling{false}; std::atomic last_bytes_sent{0ns}; std::atomic remote_endpoint_port{0}; @@ -1475,6 +1477,9 @@ namespace eosio { latest_msg_time = std::chrono::system_clock::time_point::min(); latest_blk_time = std::chrono::system_clock::time_point::min(); set_state(connection_state::closed); + block_sync_send_start = 0ns; + block_sync_frame_bytes_sent = 0; + block_sync_throttling = false; if( reconnect && !shutdown ) { my_impl->connections.start_conn_timer( std::chrono::milliseconds( 100 ), @@ -1755,25 +1760,38 @@ namespace eosio { } FC_LOG_AND_DROP(); if( sb ) { // Skip transmitting block this loop if threshold exceeded - if( block_sync_rate_limit > 0 && peer_syncing_from_us ) { - auto elapsed = std::chrono::duration_cast(get_time() - connection_start_time); - auto current_rate = double(block_sync_bytes_sent) / elapsed.count(); - if( current_rate >= block_sync_rate_limit ) { + if (block_sync_send_start == 0ns) { // start of enqueue blocks + block_sync_send_start = get_time(); + block_sync_frame_bytes_sent = 0; + } + if( block_sync_rate_limit > 0 && block_sync_frame_bytes_sent > 0 && peer_syncing_from_us ) { + auto now = get_time(); + auto elapsed_us = std::chrono::duration_cast(now - block_sync_send_start); + double current_rate_sec = (double(block_sync_frame_bytes_sent) / elapsed_us.count()) * 100000; // convert from bytes/us => bytes/sec + peer_dlog(this, "start enqueue block time ${st}, now ${t}, elapsed ${e}, rate ${r}, limit ${l}", + ("st", block_sync_send_start.count())("t", now.count())("e", elapsed_us.count())("r", current_rate_sec)("l", block_sync_rate_limit)); + if( current_rate_sec >= block_sync_rate_limit ) { block_sync_throttling = true; peer_dlog( this, "throttling block sync to peer ${host}:${port}", ("host", log_remote_endpoint_ip)("port", log_remote_endpoint_port)); return false; } } block_sync_throttling = false; - block_sync_bytes_sent += enqueue_block( sb, true ); + auto sent = enqueue_block( sb, true ); + block_sync_total_bytes_sent += sent; + block_sync_frame_bytes_sent += sent; ++peer_requested->last; if(num == peer_requested->end_block) { peer_requested.reset(); + block_sync_send_start = 0ns; + block_sync_frame_bytes_sent = 0; peer_dlog( this, "completing enqueue_sync_block ${num}", ("num", num) ); } } else { peer_ilog( this, "enqueue sync, unable to fetch block ${num}, sending benign_other go away", ("num", num) ); peer_requested.reset(); // unable to provide requested blocks + block_sync_send_start = 0ns; + block_sync_frame_bytes_sent = 0; no_retry = benign_other; enqueue( go_away_message( benign_other ) ); } diff --git a/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp b/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp index 01c84e8a67..efd0cc36b7 100644 --- a/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp +++ b/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp @@ -20,25 +20,25 @@ BOOST_AUTO_TEST_CASE(test_parse_rate_limit) { size_t which = 0; auto [listen_addr, block_sync_rate_limit] = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9876"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 0); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 0u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9776"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 0); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 0u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9877"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 640000); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 640000u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "192.168.0.1:9878"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 20971520); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 20971520u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "localhost:9879"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 500); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 500u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:9876"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000u); std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]); BOOST_CHECK_EQUAL(listen_addr, "[::1]:9876"); - BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000); + BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000u); BOOST_CHECK_EXCEPTION(plugin_impl.parse_listen_address(p2p_addresses[which++]), eosio::chain::plugin_config_exception, [](const eosio::chain::plugin_config_exception& e) {return std::strstr(e.top_message().c_str(), "IPv6 addresses must be enclosed in square brackets");}); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c8411f07f1..c64d9493e3 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -396,7 +396,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisactive_schedule.producers, - _producer_watermarks); - if (wake_time) - _pending_block_deadline = std::min(*wake_time, _pending_block_deadline); } const auto& preprocess_deadline = _pending_block_deadline; diff --git a/plugins/resource_monitor_plugin/test/test_resmon_plugin.cpp b/plugins/resource_monitor_plugin/test/test_resmon_plugin.cpp index 9e2aafbe27..2d865d0b25 100644 --- a/plugins/resource_monitor_plugin/test/test_resmon_plugin.cpp +++ b/plugins/resource_monitor_plugin/test/test_resmon_plugin.cpp @@ -144,29 +144,31 @@ BOOST_AUTO_TEST_SUITE(resmon_plugin_tests) BOOST_FIXTURE_TEST_CASE(startupNormal, resmon_fixture) { - BOOST_REQUIRE_NO_THROW( plugin_startup({"/tmp"})); + // do not use native "/tmp", as subdirectories in /tmp on test machine + // can be removed during a test run, causing file_space_handler::add_file_system + // to assert when doing get_stat on a removed directory + fc::temp_directory temp_dir; + BOOST_REQUIRE_NO_THROW(plugin_startup({temp_dir.path()})); } BOOST_FIXTURE_TEST_CASE(startupDuplicateDirs, resmon_fixture) { - BOOST_REQUIRE_NO_THROW( plugin_startup({"/tmp", "/tmp"})); + fc::temp_directory temp_dir; + BOOST_REQUIRE_NO_THROW(plugin_startup({temp_dir.path(), temp_dir.path()})); } BOOST_FIXTURE_TEST_CASE(startupMultDirs, resmon_fixture) { - // Under "/" are multiple file systems - BOOST_REQUIRE_NO_THROW( plugin_startup({"/", "/tmp"})); + fc::temp_directory temp_dir_1; + fc::temp_directory temp_dir_2; + BOOST_REQUIRE_NO_THROW(plugin_startup({temp_dir_1.path(), temp_dir_2.path()})); } BOOST_FIXTURE_TEST_CASE(startupNoExistingDirs, resmon_fixture) { - // "hsdfgd983" a random file and not existing - BOOST_REQUIRE_THROW( plugin_startup({"/tmp", "hsdfgd983"}), chain::plugin_config_exception); - } - - BOOST_FIXTURE_TEST_CASE(startupLongRun, resmon_fixture) - { - BOOST_REQUIRE_NO_THROW( plugin_startup({"/tmp"}, 5)); + fc::temp_directory temp_dir; + // temp_dir/hsdfgd983 does not exist in a just created temp directory + BOOST_REQUIRE_THROW(plugin_startup({temp_dir.path(), temp_dir.path() / "hsdfgd983"}), chain::plugin_config_exception); } BOOST_FIXTURE_TEST_CASE(warningIntervalTooBig, resmon_fixture) diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index 63b260040c..e1bc439ef2 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -485,7 +485,7 @@ BOOST_FIXTURE_TEST_CASE(test_split_log, state_history_test_fixture) { eosio::state_history::state_result result; // we should get 1023 consecutive block result eosio::chain::block_id_type prev_id; - for (int i = 0; i < head; ++i) { + for (uint32_t i = 0; i < head; ++i) { receive_result(result); BOOST_REQUIRE(std::holds_alternative(result)); auto r = std::get(result); diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py index 9205b81086..421d411243 100755 --- a/tests/p2p_sync_throttle_test.py +++ b/tests/p2p_sync_throttle_test.py @@ -24,14 +24,14 @@ appArgs.add(flag='--plugin',action='append',type=str,help='Run nodes with additional plugins') appArgs.add(flag='--connection-cleanup-period',type=int,help='Interval in whole seconds to run the connection reaper and metric collection') -args=TestHelper.parse_args({"-p","-d","--keep-logs","--prod-count" +args=TestHelper.parse_args({"-d","--keep-logs" ,"--dump-error-details","-v","--leave-running" ,"--unshared"}, applicationSpecificArgs=appArgs) -pnodes=args.p +pnodes=1 delay=args.d debug=args.v -prod_count = args.prod_count +prod_count = 2 total_nodes=4 dumpErrorDetails=args.dump_error_details @@ -106,10 +106,11 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): throttlingNode = cluster.unstartedNodes[0] i = throttlingNode.cmd.index('--p2p-listen-endpoint') throttleListenAddr = throttlingNode.cmd[i+1] - # Using 4000 bytes per second to allow syncing of ~250 transaction blocks resulting from - # the trx generators in a reasonable amount of time, while still being able to capture + # Using 40 Kilobytes per second to allow syncing of ~250 transaction blocks at ~175 bytes per transaction + # (250*175=43750 per block or 87500 per second) + # resulting from the trx generators in a reasonable amount of time, while still being able to capture # throttling state within the Prometheus update window (3 seconds in this test). - throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':4000B/s' + throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':40KB/s' throttleListenIP, throttleListenPort = throttleListenAddr.split(':') throttlingNode.cmd.append('--p2p-listen-endpoint') throttlingNode.cmd.append(f'{throttleListenIP}:{int(throttleListenPort)+100}:1TB/s') @@ -213,7 +214,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): if throttledState: wasThrottled = True break - assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=30), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out' + assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out' endThrottledSync = time.time() response = throttledNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode() Print('Throttled Node End State') diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp index 27a6c6be1e..56b684da86 100644 --- a/tests/test_read_only_trx.cpp +++ b/tests/test_read_only_trx.cpp @@ -106,7 +106,16 @@ void test_trxs_common(std::vector& specific_args, bool test_disable std::thread app_thread( [&]() { try { fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + std::vector argv = { + "test", // dummy executible name + "-p", "eosio", "-e", // actual arguments follow + "--data-dir", temp_dir_str.c_str(), + "--config-dir", temp_dir_str.c_str(), + "--max-transaction-time=100", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=400000" + }; argv.insert(argv.end(), specific_args.begin(), specific_args.end()); app->initialize(argv.size(), (char**)&argv[0]); app->find_plugin()->chain(); @@ -118,6 +127,7 @@ void test_trxs_common(std::vector& specific_args, bool test_disable BOOST_CHECK(!"app threw exception see logged error"); } ); fc::scoped_exit> on_except = [&](){ + app->quit(); if (app_thread.joinable()) app_thread.join(); }; @@ -166,8 +176,6 @@ void test_trxs_common(std::vector& specific_args, bool test_disable while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ std::this_thread::sleep_for( 100ms ); } - - app->quit(); } BOOST_CHECK_EQUAL( trace_with_except, 0u ); // should not have any traces with except in it @@ -180,62 +188,39 @@ void test_trxs_common(std::vector& specific_args, bool test_disable // test read-only trxs on 1 threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_1_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=1", - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000" }; + std::vector specific_args = { "--read-only-threads=1" }; test_trxs_common(specific_args); } // test read-only trxs on 3 threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_3_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=3", - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000" }; + std::vector specific_args = { "--read-only-threads=3" }; test_trxs_common(specific_args); } // test read-only trxs on 3 threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_3_read_only_threads_no_tierup) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=3", + std::vector specific_args = { "--read-only-threads=3", #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED "--eos-vm-oc-enable=none", #endif - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000" }; + }; test_trxs_common(specific_args, true); } // test read-only trxs on 8 separate threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=8", - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=10000", - "--read-only-read-window-time-us=400000" }; + std::vector specific_args = { "--read-only-threads=8" }; test_trxs_common(specific_args); } // test read-only trxs on 8 separate threads (with --read-only-threads) BOOST_AUTO_TEST_CASE(with_8_read_only_threads_no_tierup) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=8", + std::vector specific_args = { "--read-only-threads=8", #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED "--eos-vm-oc-enable=none", #endif - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=10000", - "--read-only-read-window-time-us=400000" }; + }; test_trxs_common(specific_args, true); } diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index ed1f5286d7..1a560ca079 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -60,6 +60,9 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); + std::promise at_block_20_promise; + std::future at_block_20_fut = at_block_20_promise.get_future(); + std::thread app_thread([&]() { try { fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); @@ -68,8 +71,48 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { "-p", "eosio", "-e"}; app->initialize(argv.size(), (char**) &argv[0]); app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()}); + + producer_plugin* prod_plug = app->find_plugin(); + chain_plugin* chain_plug = app->find_plugin(); + plugin_promise.set_value({prod_plug, chain_plug}); + + auto bs = chain_plug->chain().block_start.connect([&prod_plug, &at_block_20_promise](uint32_t bn) { + if(bn == 20u) + at_block_20_promise.set_value(); + // catching pending snapshot + if (!prod_plug->get_snapshot_requests().snapshot_requests.empty()) { + const auto& snapshot_requests = prod_plug->get_snapshot_requests().snapshot_requests; + + auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) { + auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;}); + if (it != snapshot_requests.end()) { + auto& pending = it->pending_snapshots; + if (pending.size()==1u) { + // pending snapshot block number + auto pbn = pending.begin()->head_block_num; + + // first pending snapshot + auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn; + + if (!fuzzy_start) { + BOOST_CHECK_EQUAL(block_num, ps_start); + } + else { + int diff = block_num - ps_start; + BOOST_CHECK(std::abs(diff) <= 5); // accept +/- 5 blocks if start block not specified + } + } + return true; + } + return false; + }; + + BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires + BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc + } + }); + app->exec(); return; } FC_LOG_AND_DROP() @@ -77,45 +120,6 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { }); auto [prod_plug, chain_plug] = plugin_fut.get(); - std::deque all_blocks; - std::promise empty_blocks_promise; - std::future empty_blocks_fut = empty_blocks_promise.get_future(); - auto pp = app->find_plugin(); - - auto bs = chain_plug->chain().block_start.connect([&pp](uint32_t bn) { - // catching pending snapshot - if (!pp->get_snapshot_requests().snapshot_requests.empty()) { - const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; - - auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) { - auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;}); - if (it != snapshot_requests.end()) { - auto& pending = it->pending_snapshots; - if (pending.size()==1u) { - // pending snapshot block number - auto pbn = pending.begin()->head_block_num; - - // first pending snapshot - auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn; - - if (!fuzzy_start) { - BOOST_CHECK_EQUAL(block_num, ps_start); - } - else { - int diff = block_num - ps_start; - BOOST_CHECK(std::abs(diff) <= 5); // accept +/- 5 blocks if start block not specified - } - } - return true; - } - return false; - }; - - BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires - BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 - BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc - } - }); snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that wont happen in test"}; @@ -124,31 +128,35 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; snapshot_request_params sri6 = {.block_spacing = 10, .start_block_num = 0, .snapshot_description = "Recurring every 10 blocks snapshot starting from 0"}; - pp->schedule_snapshot(sri1); - pp->schedule_snapshot(sri2); - pp->schedule_snapshot(sri3); - pp->schedule_snapshot(sri4); - pp->schedule_snapshot(sri5); - pp->schedule_snapshot(sri6); + app->post(appbase::priority::medium_low, [&]() { + prod_plug->schedule_snapshot(sri1); + prod_plug->schedule_snapshot(sri2); + prod_plug->schedule_snapshot(sri3); + prod_plug->schedule_snapshot(sri4); + prod_plug->schedule_snapshot(sri5); + prod_plug->schedule_snapshot(sri6); - // all six snapshot requests should be present now - BOOST_CHECK_EQUAL(6u, pp->get_snapshot_requests().snapshot_requests.size()); + // all six snapshot requests should be present now + BOOST_CHECK_EQUAL(6u, prod_plug->get_snapshot_requests().snapshot_requests.size()); + }); - empty_blocks_fut.wait_for(std::chrono::seconds(10)); + at_block_20_fut.get(); - // two of the snapshots are done here and requests, corresponding to them should be deleted - BOOST_CHECK_EQUAL(4u, pp->get_snapshot_requests().snapshot_requests.size()); + app->post(appbase::priority::medium_low, [&]() { + // two of the snapshots are done here and requests, corresponding to them should be deleted + BOOST_CHECK_EQUAL(4u, prod_plug->get_snapshot_requests().snapshot_requests.size()); - // check whether no pending snapshots present for a snapshot with id 0 - const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; - auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(),[](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == 0;}); + // check whether no pending snapshots present for a snapshot with id 0 + const auto& snapshot_requests = prod_plug->get_snapshot_requests().snapshot_requests; + auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(),[](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == 0;}); - // snapshot request with id = 0 should be found and should not have any pending snapshots - BOOST_REQUIRE(it != snapshot_requests.end()); - BOOST_CHECK(!it->pending_snapshots.size()); + // snapshot request with id = 0 should be found and should not have any pending snapshots + BOOST_REQUIRE(it != snapshot_requests.end()); + BOOST_CHECK(!it->pending_snapshots.size()); - // quit app - app->quit(); + // quit app + app->quit(); + }); app_thread.join(); // lets check whether schedule can be read back after restart diff --git a/tools/reproducible.Dockerfile b/tools/reproducible.Dockerfile index 667913a892..59a19f9d51 100644 --- a/tools/reproducible.Dockerfile +++ b/tools/reproducible.Dockerfile @@ -97,10 +97,12 @@ FROM builder AS build ARG LEAP_BUILD_JOBS -COPY / /src -RUN cmake -S src -B build -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -GNinja && \ +# Yuck: This places the source at the same location as leap's CI (build.yaml, build_base.yaml). Unfortunately this location only matches +# when build.yaml etc are being run from a repository named leap. +COPY / /__w/leap/leap +RUN cmake -S /__w/leap/leap -B build -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -GNinja && \ cmake --build build -t package -- ${LEAP_BUILD_JOBS:+-j$LEAP_BUILD_JOBS} && \ - src/tools/tweak-deb.sh build/leap_*.deb + /__w/leap/leap/tools/tweak-deb.sh build/leap_*.deb FROM scratch AS exporter COPY --from=build /build/*.deb /build/*.tar.* / diff --git a/tutorials/bios-boot-tutorial/README.md b/tutorials/bios-boot-tutorial/README.md index 036a19401b..428db4b742 100644 --- a/tutorials/bios-boot-tutorial/README.md +++ b/tutorials/bios-boot-tutorial/README.md @@ -7,27 +7,22 @@ The `bios-boot-tutorial.py` script simulates the bios boot sequence. 1. Python 3.x 2. CMake 3. git -4. g++ -5. build-essentials -6. pip3 -7. openssl -8. curl -9. jq -10. psmisc +4. curl +5. libcurl4-gnutls-dev ## Steps -1. Install Leap 3.1 binaries by following the steps provided in the [Leap README](https://github.com/AntelopeIO/leap/tree/release/3.1#software-installation). +1. Install the latest [Leap binaries](https://github.com/AntelopeIO/leap/releases) by following the steps provided in the README. -2. Install CDT 3.0 binaries by following the steps provided in the [CDT README](https://github.com/AntelopeIO/cdt/tree/release/3.0#binary-releases). +2. Install the latest [CDT binaries](https://github.com/AntelopeIO/cdt/releases) by following the steps provided in the README. -3. Compile EOS System Contracts 3.1: +3. Compile the latest [EOS System Contracts](https://github.com/eosnetworkfoundation/eos-system-contracts/releases). Replaces `release/*latest*` with the latest release branch. ```bash $ cd ~ -$ git clone https://github.com/eosnetworkfoundation/eos-system-contracts system-contracts-3.1 -$ cd ./system-contracts-3.1/ -$ git checkout release/3.1 +$ git clone https://github.com/eosnetworkfoundation/eos-system-contracts +$ cd ./eos-system-contracts/ +$ git checkout release/*latest* $ mkdir build $ cd ./build $ cmake -DCMAKE_BUILD_TYPE=Release .. @@ -42,8 +37,9 @@ The last command in the previous step printed the contracts directory. Make note 5. Launch the `bios-boot-tutorial.py` script: ```bash +$ pip install numpy $ cd ~ -$ git clone https://github.com/AntelopeIO/leap +$ git clone -b release/*latest* https://github.com/AntelopeIO/leap $ cd ./leap/tutorials/bios-boot-tutorial/ $ python3 bios-boot-tutorial.py --cleos=cleos --nodeos=nodeos --keosd=keosd --contracts-dir="${CONTRACTS_DIRECTORY}" -w -a ```