From 0c5ff7e739c7e3fde00aaf0697b2e326c527a9ff Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 19 Oct 2023 08:29:04 -0500 Subject: [PATCH 01/12] GH-1784 Rename cpu-effort-percent to produce-block-offset-ms and change meaning to be over complete round --- .../03_plugins/producer_plugin/index.md | 17 ++----- .../chain/include/eosio/chain/config.hpp | 2 +- .../producer_plugin/block_timing_util.hpp | 14 +++--- .../eosio/producer_plugin/producer_plugin.hpp | 4 +- plugins/producer_plugin/producer_plugin.cpp | 47 ++++++++++--------- .../test/test_block_timing_util.cpp | 22 ++++----- tests/PerformanceHarness/README.md | 26 +++++----- .../performance_test_basic.py | 4 +- tests/p2p_high_latency_test.py | 3 +- 9 files changed, 67 insertions(+), 72 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md index 533aaf5427..11157f5936 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/index.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md @@ -72,20 +72,9 @@ Config Options for eosio::producer_plugin: can extend during low usage (only enforced subjectively; use 1000 to not enforce any limit) - --produce-time-offset-us arg (=0) Offset of non last block producing time - in microseconds. Valid range 0 .. - -block_time_interval. - --last-block-time-offset-us arg (=-200000) - Offset of last block producing time in - microseconds. Valid range 0 .. - -block_time_interval. - --cpu-effort-percent arg (=80) Percentage of cpu block production time - used to produce block. Whole number - percentages, e.g. 80 for 80% - --last-block-cpu-effort-percent arg (=80) - Percentage of cpu block production time - used to produce last block. Whole - number percentages, e.g. 80 for 80% + --produce-block-offset-ms arg (=450) The number of milliseconds early the + last block of a production round should + be produced. --max-block-cpu-usage-threshold-us arg (=5000) Threshold of CPU block production to consider block full; when within diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 6d81272944..48a1d4de94 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -76,7 +76,7 @@ const static uint32_t default_max_inline_action_size = 512 * 102 const static uint16_t default_max_inline_action_depth = 4; const static uint16_t default_max_auth_depth = 6; const static uint32_t default_sig_cpu_bill_pct = 50 * percent_1; // billable percentage of signature recovery -const static uint32_t default_block_cpu_effort_pct = 90 * percent_1; // percentage of block time used for producing block +const static uint32_t default_produce_block_offset_ms = 450; const static uint16_t default_controller_thread_pool_size = 2; const static uint32_t default_max_variable_signature_length = 16384u; const static uint32_t default_max_action_return_value_size = 256; diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp index b4e3741874..f4d68061c7 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp @@ -42,19 +42,19 @@ namespace block_timing_util { // In the past, a producer would always start a block `config::block_interval_us` ahead of its block time. However, // it causes the last block in a block production round being released too late for the next producer to have // received it and start producing on schedule. To mitigate the problem, we leave no time gap in block producing. For - // example, given block_interval=500 ms and cpu effort=400 ms, assuming the our round start at time point 0; in the + // example, given block_interval=500 ms and cpu effort=400 ms, assuming our round starts at time point 0; in the // past, the block start time points would be at time point -500, 0, 500, 1000, 1500, 2000 .... With this new // approach, the block time points would become -500, -100, 300, 700, 1100 ... - inline fc::time_point production_round_block_start_time(uint32_t cpu_effort_us, chain::block_timestamp_type block_time) { + inline fc::time_point production_round_block_start_time(fc::microseconds cpu_effort_us, chain::block_timestamp_type block_time) { uint32_t block_slot = block_time.slot; uint32_t production_round_start_block_slot = (block_slot / chain::config::producer_repetitions) * chain::config::producer_repetitions; uint32_t production_round_index = block_slot % chain::config::producer_repetitions; return chain::block_timestamp_type(production_round_start_block_slot - 1).to_time_point() + - fc::microseconds(cpu_effort_us * production_round_index); + fc::microseconds(cpu_effort_us.count() * production_round_index); } - inline fc::time_point calculate_producing_block_deadline(uint32_t cpu_effort_us, chain::block_timestamp_type block_time) { + inline fc::time_point calculate_producing_block_deadline(fc::microseconds cpu_effort_us, chain::block_timestamp_type block_time) { auto estimated_deadline = production_round_block_start_time(cpu_effort_us, block_time) + fc::microseconds(cpu_effort_us); auto now = fc::time_point::now(); if (estimated_deadline > now) { @@ -62,8 +62,8 @@ namespace block_timing_util { } else { // This could only happen when the producer stop producing and then comes back alive in the middle of its own // production round. In this case, we just use the hard deadline. - const auto hard_deadline = block_time.to_time_point() - fc::microseconds(chain::config::block_interval_us - cpu_effort_us); - return std::min(hard_deadline, now + fc::microseconds(cpu_effort_us)); + const auto hard_deadline = block_time.to_time_point() - fc::microseconds(chain::config::block_interval_us - cpu_effort_us.count()); + return std::min(hard_deadline, now + cpu_effort_us); } } @@ -118,7 +118,7 @@ namespace block_timing_util { // Return the *next* block start time according to its block time slot. // Returns empty optional if no producers are in the active_schedule. // block_num is only used for watermark minimum offset. - inline std::optional calculate_producer_wake_up_time(uint32_t cpu_effort_us, uint32_t block_num, + inline std::optional calculate_producer_wake_up_time(fc::microseconds cpu_effort_us, uint32_t block_num, const chain::block_timestamp_type& ref_block_time, const std::set& producers, const std::vector& active_schedule, diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index f2c7e914f1..0f38ef35f6 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -17,7 +17,7 @@ class producer_plugin : public appbase::plugin { struct runtime_options { std::optional max_transaction_time; std::optional max_irreversible_block_age; - std::optional cpu_effort_us; + std::optional produce_block_offset_ms; std::optional subjective_cpu_leeway_us; std::optional greylist_limit; }; @@ -196,7 +196,7 @@ class producer_plugin : public appbase::plugin { } //eosio -FC_REFLECT(eosio::producer_plugin::runtime_options, (max_transaction_time)(max_irreversible_block_age)(cpu_effort_us)(subjective_cpu_leeway_us)(greylist_limit)); +FC_REFLECT(eosio::producer_plugin::runtime_options, (max_transaction_time)(max_irreversible_block_age)(produce_block_offset_ms)(subjective_cpu_leeway_us)(greylist_limit)); FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index d574254c73..7fc20de7cc 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -371,7 +371,6 @@ class producer_plugin_impl : public std::enable_shared_from_thischain().get_subjective_cpu_leeway() ? chain_plug->chain().get_subjective_cpu_leeway()->count() : std::optional(), chain_plug->chain().get_greylist_limit()}; @@ -501,14 +500,14 @@ class producer_plugin_impl : public std::enable_shared_from_this _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool std::atomic _received_block{0}; // modified by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; - int32_t _cpu_effort_us = 0; + // produce-block-offset is in terms of the complete round, internally use calculated value for each block of round + fc::microseconds _produce_block_cpu_effort_us; fc::time_point _pending_block_deadline; uint32_t _max_block_cpu_usage_threshold_us = 0; uint32_t _max_block_net_usage_threshold_bytes = 0; bool _disable_subjective_p2p_billing = true; bool _disable_subjective_api_billing = true; fc::time_point _irreversible_block_time; - fc::time_point _idle_trx_time{fc::time_point::now()}; std::vector _protocol_features_to_activate; bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block @@ -613,6 +612,17 @@ class producer_plugin_impl : public std::enable_shared_from_this next); + void set_produce_block_offset(uint32_t produce_block_offset_ms) { + EOS_ASSERT(produce_block_offset_ms < (config::producer_repetitions * config::block_interval_ms), plugin_config_exception, + "produce-block-offset-ms ${p} must be [0 - ${max})", ("p", produce_block_offset_ms)("max", config::producer_repetitions * config::block_interval_ms)); + _produce_block_cpu_effort_us = fc::milliseconds( config::block_interval_ms - produce_block_offset_ms / config::producer_repetitions ); + } + + fc::microseconds get_produce_block_offset() const { + return fc::milliseconds( (config::block_interval_ms * config::producer_repetitions) - + ((_produce_block_cpu_effort_us.count()/1000) * config::producer_repetitions) ); + } + void on_block(const block_state_ptr& bsp) { auto& chain = chain_plug->chain(); auto before = _unapplied_transactions.size(); @@ -1033,8 +1043,8 @@ void producer_plugin::set_program_options( "account that can not access to extended CPU/NET virtual resources") ("greylist-limit", boost::program_options::value()->default_value(1000), "Limit (between 1 and 1000) on the multiple that CPU/NET virtual resources can extend during low usage (only enforced subjectively; use 1000 to not enforce any limit)") - ("cpu-effort-percent", bpo::value()->default_value(config::default_block_cpu_effort_pct / config::percent_1), - "Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%") + ("produce-block-offset-ms", bpo::value()->default_value(config::default_produce_block_offset_ms), + "The number of milliseconds early the last block of a production round should be produced.") ("max-block-cpu-usage-threshold-us", bpo::value()->default_value( 5000 ), "Threshold of CPU block production to consider block full; when within threshold of max-block-cpu-usage block can be produced immediately") ("max-block-net-usage-threshold-bytes", bpo::value()->default_value( 1024 ), @@ -1126,12 +1136,7 @@ void producer_plugin_impl::plugin_initialize(const boost::program_options::varia _account_fails.set_max_failures_per_account(options.at("subjective-account-max-failures").as(), subjective_account_max_failures_window_size); - uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); - EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, - "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct)); - cpu_effort_pct *= config::percent_1; - - _cpu_effort_us = EOS_PERCENT(config::block_interval_us, cpu_effort_pct); + set_produce_block_offset(options.at("produce-block-offset-ms").as()); _max_block_cpu_usage_threshold_us = options.at("max-block-cpu-usage-threshold-us").as(); EOS_ASSERT(_max_block_cpu_usage_threshold_us < config::block_interval_us, @@ -1441,8 +1446,8 @@ void producer_plugin_impl::update_runtime_options(const producer_plugin::runtime check_speculating = true; } - if (options.cpu_effort_us) { - _cpu_effort_us = *options.cpu_effort_us; + if (options.produce_block_offset_ms) { + set_produce_block_offset(*options.produce_block_offset_ms); } if (check_speculating && in_speculating_mode()) { @@ -1838,10 +1843,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } } - _pending_block_deadline = block_timing_util::calculate_producing_block_deadline(_cpu_effort_us, block_time); + _pending_block_deadline = block_timing_util::calculate_producing_block_deadline(_produce_block_cpu_effort_us, block_time); } else if (!_producers.empty()) { // cpu effort percent doesn't matter for the first block of the round, use max (block_interval_us) for cpu effort - auto wake_time = block_timing_util::calculate_producer_wake_up_time(config::block_interval_us, chain.head_block_num(), chain.head_block_time(), + auto wake_time = block_timing_util::calculate_producer_wake_up_time(fc::microseconds(config::block_interval_us), chain.head_block_num(), chain.head_block_time(), _producers, chain.head_block_state()->active_schedule.producers, _producer_watermarks); if (wake_time) @@ -2004,8 +2009,8 @@ bool producer_plugin_impl::remove_expired_trxs(const fc::time_point& deadline) { }); if (exhausted && in_producing_mode()) { - fc_wlog(_log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " - "Expired ${expired}", ("n", orig_count)("expired", num_expired)); + fc_wlog(_log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline ${d}, " + "Expired ${expired}", ("n", orig_count)("d", deadline)("expired", num_expired)); } else { fc_dlog(_log, "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", ("n", orig_count)("ex", num_expired)); } @@ -2473,7 +2478,7 @@ void producer_plugin_impl::schedule_production_loop() { if (!_producers.empty() && !production_disabled_by_policy()) { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); - auto wake_time = block_timing_util::calculate_producer_wake_up_time(_cpu_effort_us, chain.head_block_num(), calculate_pending_block_time(), + auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort_us, chain.head_block_num(), calculate_pending_block_time(), _producers, chain.head_block_state()->active_schedule.producers, _producer_watermarks); schedule_delayed_production_loop(weak_from_this(), wake_time); @@ -2492,7 +2497,7 @@ void producer_plugin_impl::schedule_production_loop() { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state"); - auto wake_time = block_timing_util::calculate_producer_wake_up_time(_cpu_effort_us, chain.pending_block_num(), chain.pending_block_timestamp(), + auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort_us, chain.pending_block_num(), chain.pending_block_timestamp(), _producers, chain.head_block_state()->active_schedule.producers, _producer_watermarks); schedule_delayed_production_loop(weak_from_this(), wake_time); @@ -2509,7 +2514,7 @@ void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { assert(in_producing_mode()); // we succeeded but block may be exhausted static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); - auto deadline = block_timing_util::calculate_producing_block_deadline(_cpu_effort_us, chain.pending_block_time()); + auto deadline = block_timing_util::calculate_producing_block_deadline(_produce_block_cpu_effort_us, chain.pending_block_time()); if (!exhausted && deadline > fc::time_point::now()) { // ship this block off no later than its deadline diff --git a/plugins/producer_plugin/test/test_block_timing_util.cpp b/plugins/producer_plugin/test/test_block_timing_util.cpp index efb045b477..a3a43452e8 100644 --- a/plugins/producer_plugin/test/test_block_timing_util.cpp +++ b/plugins/producer_plugin/test/test_block_timing_util.cpp @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(test_production_round_block_start_time) { for (int i = 0; i < eosio::chain::config::producer_repetitions; ++i, expected_start_time = expected_start_time + cpu_effort) { auto block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + i); - BOOST_CHECK_EQUAL(eosio::block_timing_util::production_round_block_start_time(cpu_effort_us, block_time), expected_start_time); + BOOST_CHECK_EQUAL(eosio::block_timing_util::production_round_block_start_time(cpu_effort, block_time), expected_start_time); } } @@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE(test_calculate_block_deadline) { for (int i = 0; i < eosio::chain::config::producer_repetitions; ++i) { auto block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + i); auto expected_deadline = block_time.to_time_point() - fc::milliseconds((i + 1) * 100); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, block_time), expected_deadline); fc::mock_time_traits::set_now(expected_deadline); } @@ -56,18 +56,18 @@ BOOST_AUTO_TEST_CASE(test_calculate_block_deadline) { auto second_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 1); fc::mock_time_traits::set_now(second_block_time.to_time_point() - fc::milliseconds(200)); auto second_block_hard_deadline = second_block_time.to_time_point() - fc::milliseconds(100); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, second_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, second_block_time), second_block_hard_deadline); // use previous deadline as now fc::mock_time_traits::set_now(second_block_hard_deadline); auto third_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 2); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, third_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, third_block_time), third_block_time.to_time_point() - fc::milliseconds(300)); // use previous deadline as now fc::mock_time_traits::set_now(third_block_time.to_time_point() - fc::milliseconds(300)); auto forth_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 3); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, forth_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, forth_block_time), forth_block_time.to_time_point() - fc::milliseconds(400)); /////////////////////////////////////////////////////////////////////////////////////////////////// @@ -75,21 +75,21 @@ BOOST_AUTO_TEST_CASE(test_calculate_block_deadline) { auto seventh_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 6); fc::mock_time_traits::set_now(seventh_block_time.to_time_point() - fc::milliseconds(500)); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, seventh_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, seventh_block_time), seventh_block_time.to_time_point() - fc::milliseconds(100)); // use previous deadline as now fc::mock_time_traits::set_now(seventh_block_time.to_time_point() - fc::milliseconds(100)); auto eighth_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 7); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, eighth_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, eighth_block_time), eighth_block_time.to_time_point() - fc::milliseconds(200)); // use previous deadline as now fc::mock_time_traits::set_now(eighth_block_time.to_time_point() - fc::milliseconds(200)); auto ninth_block_time = eosio::chain::block_timestamp_type(production_round_1st_block_slot + 8); - BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort_us, ninth_block_time), + BOOST_CHECK_EQUAL(calculate_producing_block_deadline(cpu_effort, ninth_block_time), ninth_block_time.to_time_point() - fc::milliseconds(300)); } } @@ -102,7 +102,7 @@ BOOST_AUTO_TEST_CASE(test_calculate_producer_wake_up_time) { producer_watermarks empty_watermarks; // use full cpu effort for most of these tests since calculate_producing_block_deadline is tested above - constexpr uint32_t full_cpu_effort = eosio::chain::config::block_interval_us; + constexpr fc::microseconds full_cpu_effort = fc::microseconds{eosio::chain::config::block_interval_us}; { // no producers BOOST_CHECK_EQUAL(calculate_producer_wake_up_time(full_cpu_effort, 2, chain::block_timestamp_type{}, {}, {}, empty_watermarks), std::optional{}); @@ -206,7 +206,7 @@ BOOST_AUTO_TEST_CASE(test_calculate_producer_wake_up_time) { BOOST_CHECK_EQUAL(calculate_producer_wake_up_time(full_cpu_effort, 2, block_timestamp, producers, active_schedule, empty_watermarks), expected_block_time); // cpu_effort at 50%, initc - constexpr uint32_t half_cpu_effort = eosio::chain::config::block_interval_us / 2u; + constexpr fc::microseconds half_cpu_effort = fc::microseconds{eosio::chain::config::block_interval_us / 2u}; producers = std::set{ "initc"_n }; block_timestamp = block_timestamp_type(prod_round_1st_block_slot); expected_block_time = block_timestamp_type(prod_round_1st_block_slot + 2*config::producer_repetitions).to_time_point(); @@ -221,7 +221,7 @@ BOOST_AUTO_TEST_CASE(test_calculate_producer_wake_up_time) { block_timestamp = block_timestamp_type(prod_round_1st_block_slot + 2*config::producer_repetitions + 2); // second in round is 50% sooner expected_block_time = block_timestamp.to_time_point(); - expected_block_time -= fc::microseconds(2*half_cpu_effort); + expected_block_time -= fc::microseconds(2*half_cpu_effort.count()); BOOST_CHECK_EQUAL(calculate_producer_wake_up_time(half_cpu_effort, 2, block_timestamp, producers, active_schedule, empty_watermarks), expected_block_time); } { // test watermark diff --git a/tests/PerformanceHarness/README.md b/tests/PerformanceHarness/README.md index 15c2fadc00..42ff944ef0 100644 --- a/tests/PerformanceHarness/README.md +++ b/tests/PerformanceHarness/README.md @@ -504,7 +504,7 @@ usage: PerformanceHarnessScenarioRunner.py findMax testBpOpMode overrideBasicTes [--cluster-log-lvl {all,debug,info,warn,error,off}] [--net-threads NET_THREADS] [--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING] - [--cpu-effort-percent CPU_EFFORT_PERCENT] + [--produce-block-offset-ms PRODUCE_BLOCK_OFFSET_MS] [--producer-threads PRODUCER_THREADS] [--read-only-write-window-time-us READ_ONLY_WRITE_WINDOW_TIME_US] [--read-only-read-window-time-us READ_ONLY_READ_WINDOW_TIME_US] @@ -579,8 +579,9 @@ Performance Test Basic Base: Number of worker threads in net_plugin thread pool --disable-subjective-billing DISABLE_SUBJECTIVE_BILLING Disable subjective CPU billing for API/P2P transactions - --cpu-effort-percent CPU_EFFORT_PERCENT - Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80% + --produce-block-offset-ms PRODUCE_BLOCK_OFFSET_MS + The number of milliseconds early the last block of a production round should + be produced. --producer-threads PRODUCER_THREADS Number of worker threads in producer thread pool --read-only-write-window-time-us READ_ONLY_WRITE_WINDOW_TIME_US @@ -663,7 +664,7 @@ The following classes and scripts are typically used by the Performance Harness [--cluster-log-lvl {all,debug,info,warn,error,off}] [--net-threads NET_THREADS] [--disable-subjective-billing DISABLE_SUBJECTIVE_BILLING] - [--cpu-effort-percent CPU_EFFORT_PERCENT] + [--produce-block-offset-ms PRODUCE_BLOCK_OFFSET_MS] [--producer-threads PRODUCER_THREADS] [--http-max-in-flight-requests HTTP_MAX_IN_FLIGHT_REQUESTS] [--http-max-response-time-ms HTTP_MAX_RESPONSE_TIME_MS] @@ -742,8 +743,9 @@ Performance Test Basic Base: Number of worker threads in net_plugin thread pool (default: 4) --disable-subjective-billing DISABLE_SUBJECTIVE_BILLING Disable subjective CPU billing for API/P2P transactions (default: True) - --cpu-effort-percent CPU_EFFORT_PERCENT - Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80% (default: 100) + --produce-block-offset-ms PRODUCE_BLOCK_OFFSET_MS + The number of milliseconds early the last block of a production round should + be produced. --producer-threads PRODUCER_THREADS Number of worker threads in producer thread pool (default: 2) --http-max-in-flight-requests HTTP_MAX_IN_FLIGHT_REQUESTS @@ -1586,9 +1588,9 @@ Finally, the full detail test report for each of the determined max TPS throughp "greylistLimit": null, "_greylistLimitNodeosDefault": 1000, "_greylistLimitNodeosArg": "--greylist-limit", - "cpuEffortPercent": 100, - "_cpuEffortPercentNodeosDefault": 90, - "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", + "produceBlockOffsetMs": 0, + "_produceBlockOffsetMsDefault": 450, + "_produceBlockOffsetMsArg": "--produce-block-offset-ms", "maxBlockCpuUsageThresholdUs": null, "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", @@ -2246,9 +2248,9 @@ The Performance Test Basic generates, by default, a report that details results "greylistLimit": null, "_greylistLimitNodeosDefault": 1000, "_greylistLimitNodeosArg": "--greylist-limit", - "cpuEffortPercent": 100, - "_cpuEffortPercentNodeosDefault": 90, - "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", + "produceBlockOffsetMs": 0, + "_produceBlockOffsetMsDefault": 450, + "_produceBlockOffsetMsArg": "--produce-block-offset-ms", "maxBlockCpuUsageThresholdUs": null, "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, "_maxBlockCpuUsageThresholdUsNodeosArg": "--max-block-cpu-usage-threshold-us", diff --git a/tests/PerformanceHarness/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py index 5fc66778b9..f3448e56cd 100755 --- a/tests/PerformanceHarness/performance_test_basic.py +++ b/tests/PerformanceHarness/performance_test_basic.py @@ -663,7 +663,7 @@ def setupClusterConfig(args) -> ClusterConfig: producerPluginArgs = ProducerPluginArgs(disableSubjectiveApiBilling=args.disable_subjective_billing, disableSubjectiveP2pBilling=args.disable_subjective_billing, - cpuEffortPercent=args.cpu_effort_percent, + produceBlockOffsetMs=args.produce_block_offset_ms, producerThreads=args.producer_threads, maxTransactionTime=-1, readOnlyWriteWindowTimeUs=args.read_only_write_window_time_us, readOnlyReadWindowTimeUs=args.read_only_read_window_time_us) @@ -720,7 +720,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa choices=["all", "debug", "info", "warn", "error", "off"], default="info") ptbBaseParserGroup.add_argument("--net-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in net_plugin thread pool", default=4) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help=argparse.SUPPRESS if suppressHelp else "Disable subjective CPU billing for API/P2P transactions", default=True) - ptbBaseParserGroup.add_argument("--cpu-effort-percent", type=int, help=argparse.SUPPRESS if suppressHelp else "Percentage of cpu block production time used to produce block. Whole number percentages, e.g. 80 for 80%%", default=100) + ptbBaseParserGroup.add_argument("--produce-block-offset-ms", type=int, help=argparse.SUPPRESS if suppressHelp else "The number of milliseconds early the last block of a production round should be produced.", default=0) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in producer thread pool", default=2) ptbBaseParserGroup.add_argument("--read-only-write-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the write window lasts.", default=200000) ptbBaseParserGroup.add_argument("--read-only-read-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the read window lasts.", default=60000) diff --git a/tests/p2p_high_latency_test.py b/tests/p2p_high_latency_test.py index 861fade6a0..2b8028209c 100644 --- a/tests/p2p_high_latency_test.py +++ b/tests/p2p_high_latency_test.py @@ -68,8 +68,7 @@ def exec(cmd): try: TestHelper.printSystemInfo("BEGIN") - traceNodeosArgs=" --plugin eosio::producer_plugin --produce-time-offset-us 0 --last-block-time-offset-us 0 --cpu-effort-percent 100 \ - --last-block-cpu-effort-percent 100 --producer-threads 1 --plugin eosio::net_plugin --net-threads 1" + traceNodeosArgs=" --plugin eosio::producer_plugin --produce-block-offset-ms 0 --producer-threads 1 --plugin eosio::net_plugin --net-threads 1" if cluster.launch(pnodes=1, totalNodes=totalNodes, totalProducers=1, specificExtraNodeosArgs=specificExtraNodeosArgs, extraNodeosArgs=traceNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") From 5ca96dbb9ea4ebf41288ac5868517d2e581a45f8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 07:42:09 -0500 Subject: [PATCH 02/12] GH-1784 Update block producing doc --- .../10_block-producing-explained.md | 112 +++++++----------- 1 file changed, 43 insertions(+), 69 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md index 1c3d56ef14..147785d519 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -4,87 +4,61 @@ content_title: Block Production Explained For simplicity of the explanation let's consider the following notations: -m = max_block_cpu_usage +* `r` = `producer_repetitions = 12` (hard-coded value) +* `m` = `max_block_cpu_usage` (on-chain consensus value) +* `n` = `max_block_net_usage` (on-chain consensus value) +* `t` = `block-time` +* `e` = `produce-block-offset-ms` (nodeos configuration) +* `w` = `block-time-interval = 500ms` (hard-coded value) +* `a` = `produce-block-early-amount = w - (w - (e / r)) = e / r ms` (how much to release each block of round early by) +* `l` = `produce-block-time = t - a` +* `p` = `produce block time window = w - a` (amount of wall clock time to produce a block) +* `c` = `billed_cpu_in_block = minimum(m, w - a)` +* `n` = `network tcp/ip latency` +* `h` = `block header validation time ms` + +Peer validation for similar hardware/version/config will be <= `m` + +**Let's consider for exemplification the following two BPs and their network topology as depicted in the below diagram** -t = block-time - -e = last-block-cpu-effort-percent - -w = block_time_interval = 500ms - -a = produce-block-early-amount = (w - w*e/100) ms - -p = produce-block-time; p = t - a - -c = billed_cpu_in_block = minimum(m, w - a) - -n = network tcp/ip latency - -peer validation for similar hardware/eosio-version/config will be <= m - -**Let's consider for exemplification the following four BPs and their network topology as depicted in below diagram** - - -```dot-svg -#p2p_local_chain_prunning.dot - local chain prunning -# -#notes: * to see image copy/paste to https://dreampuf.github.io/GraphvizOnline -# * image will be rendered by gatsby-remark-graphviz plugin in eosio docs. - -digraph { - newrank=true #allows ranks inside subgraphs (important!) - compound=true #allows edges connecting nodes with subgraphs - graph [rankdir=LR] - node [style=filled, fillcolor=lightgray, shape=square, fixedsize=true, width=.55, fontsize=10] - edge [dir=both, arrowsize=.6, weight=100] - splines=false - - subgraph cluster_chain { - label="Block Producers Peers"; labelloc="b" - graph [color=invis] - b0 [label="...", color=invis, style=""] - b1 [label="BP-A"]; b2 [label="BP-A\nPeer"]; b3 [label="BP-B\nPeer"]; b4 [label="BP-B"] - b5 [label="...", color=invis, style=""] - b0 -> b1 -> b2 -> b3 -> b4 -> b5 - } //cluster_chain - -} //digraph +``` + +------+ +------+ +------+ +------+ + -->| BP-A |---->| BP-A |------>| BP-B |---->| BP-B | + +------+ | Peer | | Peer | +------+ + +------+ +------+ ``` -`BP-A` will send block at `p` and, - -`BP-B` needs block at time `t` or otherwise will drop it. +`BP-A` will send block at `l` and, `BP-B` needs block at time `t` or otherwise will drop it. If `BP-A`is producing 12 blocks as follows `b(lock) at t(ime) 1`, `bt 1.5`, `bt 2`, `bt 2.5`, `bt 3`, `bt 3.5`, `bt 4`, `bt 4.5`, `bt 5`, `bt 5.5`, `bt 6`, `bt 6.5` then `BP-B` needs `bt 6.5` by time `6.5` so it has `.5` to produce `bt 7`. Please notice that the time of `bt 7` minus `.5` equals the time of `bt 6.5` therefore time `t` is the last block time of `BP-A` and when `BP-B` needs to start its first block. -## Example 1 -`BP-A` has 50% e, m = 200ms, c = 200ms, n = 0ms, a = 250ms: -`BP-A` sends at (t-250ms) <-> `BP-A-Peer` processes for 200ms and sends at (t - 50ms) <-> `BP-B-Peer` processes for 200ms and sends at (t + 150ms) <-> arrive at `BP-B` 150ms too late. - -## Example 2 -`BP-A` has 40% e and m = 200ms, c = 200ms, n = 0ms, a = 300ms: -(t-300ms) <-> (+200ms) <-> (+200ms) <-> arrive at `BP-B` 100ms too late. +A block is produced and sent when either it reaches `m` or `n` or `p`. -## Example 3 -`BP-A` has 30% e and m = 200ms, c = 150ms, n = 0ms, a = 350ms: -(t-350ms) <-> (+150ms) <-> (+150ms) <-> arrive at `BP-B` with 50ms to spare. +Starting in Leap 4.0, blocks are propagated after block header validation. This means instead of `BP-A Peer` & `BP-B Peer` taking `m` time to validate and forward a block it only takes a small number of milliseconds to verify the block header and then forward the block. -## Example 4 -`BP-A` has 25% e and m = 200ms, c = 125ms, n = 0ms, a = 375ms: -(t-375ms) <-> (+125ms) <-> (+125ms) <-> arrive at `BP-B` with 125ms to spare. +Starting in Leap 5.0, blocks in a round are started immediately after the completion of the previous block. Before 5.0, blocks were always started on `w` intervals and a node would "sleep" between blocks if needed. In 5.0, the "sleeps" are all moved to the end of the block production round. -## Example 5 -`BP-A` has 10% e and m = 200ms, c = 50ms, n = 0ms, a = 450ms: -(t-450ms) <-> (+50ms) <-> (+50ms) <-> arrive at `BP-B` with 350ms to spare. +## Example 1: block arrives 110ms early (zero network latency between BP and immediate peer) +* `BP-A` has e = 120, n = 0ms, h = 5ms, a = 10ms +* `BP-A` sends b1 at `t1-10ms` => `BP-A-Peer` processes `h=5ms`, sends at `t-5ms` => `BP-B-Peer` processes `h=5ms`, sends at `t-0ms` => arrives at `BP-B` at `t`. +* `BP-A` starts b2 at `t1-10ms`, sends b2 at `t2-20ms` => `BP-A-Peer` processes `h=5ms`, sends at `t2-15ms` => `BP-B-Peer` processes `h=5ms`, sends at `t2-10ms` => arrives at `BP-B` at `t2-10ms`. +* `BP-A` starts b3 at `t2-20ms`, ... +* `BP-A` starts b12 at `t11-110ms`, sends b12 at `t12-120ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-115ms` => `BP-B-Peer` processes `h=5ms`, sends at `t12-110ms` => arrives at `BP-B` at `t12-110ms` -## Example 6 -`BP-A` has 10% e and m = 200ms, c = 50ms, n = 15ms, a = 450ms: -(t-450ms) <- +15ms -> (+50ms) <- +15ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 305ms to spare. +## Example 2: block arrives 80ms early (zero network latency between BP and immediate peer) +* `BP-A` has e = 240, n = 150ms, h = 5ms, a = 20ms +* `BP-A` sends b1 at `t1-20ms` => `BP-A-Peer` processes `h=5ms`, sends at `t-15ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t+140ms` => arrives at `BP-B` at `t+140ms`. +* `BP-A` starts b2 at `t1-20ms`, sends b2 at `t2-40ms` => `BP-A-Peer` processes `h=5ms`, sends at `t2-35ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t2+120ms` => arrives at `BP-B` at `t2+120ms`. +* `BP-A` starts b3 at `t2-40ms`, ... +* `BP-A` starts b12 at `t11-220ms`, sends b12 at `t12-240ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-235ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t12-80ms` => arrives at `BP-B` at `t12-80ms` -## Example 7 -Example world-wide network:`BP-A`has 10% e and m = 200ms, c = 50ms, n = 15ms/250ms, a = 450ms: -(t-450ms) <- +15ms -> (+50ms) <- +250ms -> (+50ms) <- +15ms -> `BP-B` <-> arrive with 70ms to spare. +## Example 3: block arrives 16ms late and is dropped (zero network latency between BP and immediate peer) +* `BP-A` has e = 204, n = 200ms, h = 10ms, a = 17ms +* `BP-A` sends b1 at `t1-17ms` => `BP-A-Peer` processes `h=10ms`, sends at `t-7ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t+203ms` => arrives at `BP-B` at `t+203ms`. +* `BP-A` starts b2 at `t1-17ms`, sends b2 at `t2-34ms` => `BP-A-Peer` processes `h=10ms`, sends at `t2-24ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2+186ms` => arrives at `BP-B` at `t2+186ms`. +* `BP-A` starts b3 at `t2-34ms`, ... +* `BP-A` starts b12 at `t11-187ms`, sends b12 at `t12-204ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-194ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t12+16ms` => arrives at `BP-B` at `t12-16ms` Running wasm-runtime=eos-vm-jit eos-vm-oc-enable on relay node will reduce the validation time. From 7bd985028978d6e46c942aafbb1967c3acf87fb7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 10:15:36 -0500 Subject: [PATCH 03/12] GH-1784 Add example with full blocks --- .../10_block-producing-explained.md | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md index 147785d519..b2bac29133 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -19,7 +19,7 @@ For simplicity of the explanation let's consider the following notations: Peer validation for similar hardware/version/config will be <= `m` -**Let's consider for exemplification the following two BPs and their network topology as depicted in the below diagram** +**Let's consider the example of the following two BPs and their network topology as depicted in the below diagram** ``` +------+ +------+ +------+ +------+ @@ -40,25 +40,45 @@ Starting in Leap 4.0, blocks are propagated after block header validation. This Starting in Leap 5.0, blocks in a round are started immediately after the completion of the previous block. Before 5.0, blocks were always started on `w` intervals and a node would "sleep" between blocks if needed. In 5.0, the "sleeps" are all moved to the end of the block production round. -## Example 1: block arrives 110ms early (zero network latency between BP and immediate peer) +## Example 1: block arrives 110ms early +* Assuming zero network latency between BP and immediate BP peer. +* Assuming blocks do not reach `m` and therefore take `w - a` time to produce. +* Assume block completion including signing takes zero time. * `BP-A` has e = 120, n = 0ms, h = 5ms, a = 10ms * `BP-A` sends b1 at `t1-10ms` => `BP-A-Peer` processes `h=5ms`, sends at `t-5ms` => `BP-B-Peer` processes `h=5ms`, sends at `t-0ms` => arrives at `BP-B` at `t`. * `BP-A` starts b2 at `t1-10ms`, sends b2 at `t2-20ms` => `BP-A-Peer` processes `h=5ms`, sends at `t2-15ms` => `BP-B-Peer` processes `h=5ms`, sends at `t2-10ms` => arrives at `BP-B` at `t2-10ms`. * `BP-A` starts b3 at `t2-20ms`, ... * `BP-A` starts b12 at `t11-110ms`, sends b12 at `t12-120ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-115ms` => `BP-B-Peer` processes `h=5ms`, sends at `t12-110ms` => arrives at `BP-B` at `t12-110ms` -## Example 2: block arrives 80ms early (zero network latency between BP and immediate peer) +## Example 2: block arrives 80ms early +* Assuming zero network latency between BP and immediate BP peer. +* Assuming blocks do not reach `m` and therefore take `w - a` time to produce. +* Assume block completion including signing takes zero time. * `BP-A` has e = 240, n = 150ms, h = 5ms, a = 20ms * `BP-A` sends b1 at `t1-20ms` => `BP-A-Peer` processes `h=5ms`, sends at `t-15ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t+140ms` => arrives at `BP-B` at `t+140ms`. * `BP-A` starts b2 at `t1-20ms`, sends b2 at `t2-40ms` => `BP-A-Peer` processes `h=5ms`, sends at `t2-35ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t2+120ms` => arrives at `BP-B` at `t2+120ms`. * `BP-A` starts b3 at `t2-40ms`, ... * `BP-A` starts b12 at `t11-220ms`, sends b12 at `t12-240ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-235ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t12-80ms` => arrives at `BP-B` at `t12-80ms` -## Example 3: block arrives 16ms late and is dropped (zero network latency between BP and immediate peer) +## Example 3: block arrives 16ms late and is dropped +* Assuming zero network latency between BP and immediate BP peer. +* Assuming blocks do not reach `m` and therefore take `w - a` time to produce. +* Assume block completion including signing takes zero time. * `BP-A` has e = 204, n = 200ms, h = 10ms, a = 17ms * `BP-A` sends b1 at `t1-17ms` => `BP-A-Peer` processes `h=10ms`, sends at `t-7ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t+203ms` => arrives at `BP-B` at `t+203ms`. * `BP-A` starts b2 at `t1-17ms`, sends b2 at `t2-34ms` => `BP-A-Peer` processes `h=10ms`, sends at `t2-24ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2+186ms` => arrives at `BP-B` at `t2+186ms`. * `BP-A` starts b3 at `t2-34ms`, ... * `BP-A` starts b12 at `t11-187ms`, sends b12 at `t12-204ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-194ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t12+16ms` => arrives at `BP-B` at `t12-16ms` +## Example 4: full blocks are produced early +* Assuming zero network latency between BP and immediate BP peer. +* Assume all blocks are full as there are enough queued up unapplied transactions ready to fill all blocks. +* Assume a block can be produced with 200ms worth of transactions in 225ms worth of time. There is overhead for producing the block. +* `BP-A` has e = 120, n = 200ms, h = 10ms, a = 10ms +* `BP-A` sends b1 at `t1-275s` => `BP-A-Peer` processes `h=10ms`, sends at `t-265ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t-55ms` => arrives at `BP-B` at `t-55ms`. +* `BP-A` starts b2 at `t1-275ms`, sends b2 at `t2-550ms (t1-50ms)` => `BP-A-Peer` processes `h=10ms`, sends at `t2-540ms` => `BP-B-Peer` processes `h=10ms`, sends at `t2-530ms` => arrives at `BP-B` at `t2-530ms`. +* `BP-A` starts b3 at `t2-550ms`, ... +* `BP-A` starts b12 at `t11-3025ms`, sends b12 at `t12-3300ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-3290ms` => `BP-B-Peer` processes `h=10ms`, sends at `t12-3280ms` => arrives at `BP-B` at `t12-3280ms` + + Running wasm-runtime=eos-vm-jit eos-vm-oc-enable on relay node will reduce the validation time. From 694b88463db7ac4119eabfcdf4797e11f7ad154a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 10:20:13 -0500 Subject: [PATCH 04/12] GH-1784 Add in network latency --- .../producer_plugin/10_block-producing-explained.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md index b2bac29133..80dbf09abc 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -76,9 +76,9 @@ Starting in Leap 5.0, blocks in a round are started immediately after the comple * Assume a block can be produced with 200ms worth of transactions in 225ms worth of time. There is overhead for producing the block. * `BP-A` has e = 120, n = 200ms, h = 10ms, a = 10ms * `BP-A` sends b1 at `t1-275s` => `BP-A-Peer` processes `h=10ms`, sends at `t-265ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t-55ms` => arrives at `BP-B` at `t-55ms`. -* `BP-A` starts b2 at `t1-275ms`, sends b2 at `t2-550ms (t1-50ms)` => `BP-A-Peer` processes `h=10ms`, sends at `t2-540ms` => `BP-B-Peer` processes `h=10ms`, sends at `t2-530ms` => arrives at `BP-B` at `t2-530ms`. +* `BP-A` starts b2 at `t1-275ms`, sends b2 at `t2-550ms (t1-50ms)` => `BP-A-Peer` processes `h=10ms`, sends at `t2-540ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2-330ms` => arrives at `BP-B` at `t2-330ms`. * `BP-A` starts b3 at `t2-550ms`, ... -* `BP-A` starts b12 at `t11-3025ms`, sends b12 at `t12-3300ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-3290ms` => `BP-B-Peer` processes `h=10ms`, sends at `t12-3280ms` => arrives at `BP-B` at `t12-3280ms` +* `BP-A` starts b12 at `t11-3025ms`, sends b12 at `t12-3300ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-3290ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t12-3080ms` => arrives at `BP-B` at `t12-3080ms` Running wasm-runtime=eos-vm-jit eos-vm-oc-enable on relay node will reduce the validation time. From fbc83d1a41617ce82a5f5231944cbb18f46fe583 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 11:06:51 -0500 Subject: [PATCH 05/12] GH-1784 n was used for two different variables --- .../producer_plugin/10_block-producing-explained.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md index 80dbf09abc..bb6cb9663e 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -6,7 +6,7 @@ For simplicity of the explanation let's consider the following notations: * `r` = `producer_repetitions = 12` (hard-coded value) * `m` = `max_block_cpu_usage` (on-chain consensus value) -* `n` = `max_block_net_usage` (on-chain consensus value) +* `u` = `max_block_net_usage` (on-chain consensus value) * `t` = `block-time` * `e` = `produce-block-offset-ms` (nodeos configuration) * `w` = `block-time-interval = 500ms` (hard-coded value) @@ -34,7 +34,7 @@ If `BP-A`is producing 12 blocks as follows `b(lock) at t(ime) 1`, `bt 1.5`, `bt Please notice that the time of `bt 7` minus `.5` equals the time of `bt 6.5` therefore time `t` is the last block time of `BP-A` and when `BP-B` needs to start its first block. -A block is produced and sent when either it reaches `m` or `n` or `p`. +A block is produced and sent when either it reaches `m` or `u` or `p`. Starting in Leap 4.0, blocks are propagated after block header validation. This means instead of `BP-A Peer` & `BP-B Peer` taking `m` time to validate and forward a block it only takes a small number of milliseconds to verify the block header and then forward the block. @@ -74,7 +74,7 @@ Starting in Leap 5.0, blocks in a round are started immediately after the comple * Assuming zero network latency between BP and immediate BP peer. * Assume all blocks are full as there are enough queued up unapplied transactions ready to fill all blocks. * Assume a block can be produced with 200ms worth of transactions in 225ms worth of time. There is overhead for producing the block. -* `BP-A` has e = 120, n = 200ms, h = 10ms, a = 10ms +* `BP-A` has e = 120, m = 200ms, n = 200ms, h = 10ms, a = 10ms * `BP-A` sends b1 at `t1-275s` => `BP-A-Peer` processes `h=10ms`, sends at `t-265ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t-55ms` => arrives at `BP-B` at `t-55ms`. * `BP-A` starts b2 at `t1-275ms`, sends b2 at `t2-550ms (t1-50ms)` => `BP-A-Peer` processes `h=10ms`, sends at `t2-540ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2-330ms` => arrives at `BP-B` at `t2-330ms`. * `BP-A` starts b3 at `t2-550ms`, ... From 48422bf0d5233cbd23148837139b19c98026ee12 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 12:30:45 -0500 Subject: [PATCH 06/12] GH-1784 Additinal clarification and a fix --- .../10_block-producing-explained.md | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md index bb6cb9663e..454a2fb613 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/10_block-producing-explained.md @@ -41,7 +41,7 @@ Starting in Leap 4.0, blocks are propagated after block header validation. This Starting in Leap 5.0, blocks in a round are started immediately after the completion of the previous block. Before 5.0, blocks were always started on `w` intervals and a node would "sleep" between blocks if needed. In 5.0, the "sleeps" are all moved to the end of the block production round. ## Example 1: block arrives 110ms early -* Assuming zero network latency between BP and immediate BP peer. +* Assuming zero network latency between all nodes. * Assuming blocks do not reach `m` and therefore take `w - a` time to produce. * Assume block completion including signing takes zero time. * `BP-A` has e = 120, n = 0ms, h = 5ms, a = 10ms @@ -51,30 +51,33 @@ Starting in Leap 5.0, blocks in a round are started immediately after the comple * `BP-A` starts b12 at `t11-110ms`, sends b12 at `t12-120ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-115ms` => `BP-B-Peer` processes `h=5ms`, sends at `t12-110ms` => arrives at `BP-B` at `t12-110ms` ## Example 2: block arrives 80ms early -* Assuming zero network latency between BP and immediate BP peer. +* Assuming zero network latency between `BP-A` & `BP-A Peer` and between `BP-B Peer` & `BP-B`. +* Assuming 150ms network latency between `BP-A Peer` & `BP-B Peer`. * Assuming blocks do not reach `m` and therefore take `w - a` time to produce. * Assume block completion including signing takes zero time. -* `BP-A` has e = 240, n = 150ms, h = 5ms, a = 20ms +* `BP-A` has e = 240, n = 0ms/150ms, h = 5ms, a = 20ms * `BP-A` sends b1 at `t1-20ms` => `BP-A-Peer` processes `h=5ms`, sends at `t-15ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t+140ms` => arrives at `BP-B` at `t+140ms`. * `BP-A` starts b2 at `t1-20ms`, sends b2 at `t2-40ms` => `BP-A-Peer` processes `h=5ms`, sends at `t2-35ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t2+120ms` => arrives at `BP-B` at `t2+120ms`. * `BP-A` starts b3 at `t2-40ms`, ... * `BP-A` starts b12 at `t11-220ms`, sends b12 at `t12-240ms` => `BP-A-Peer` processes `h=5ms`, sends at `t12-235ms` =(150ms)> `BP-B-Peer` processes `h=5ms`, sends at `t12-80ms` => arrives at `BP-B` at `t12-80ms` ## Example 3: block arrives 16ms late and is dropped -* Assuming zero network latency between BP and immediate BP peer. +* Assuming zero network latency between `BP-A` & `BP-A Peer` and between `BP-B Peer` & `BP-B`. +* Assuming 200ms network latency between `BP-A Peer` & `BP-B Peer`. * Assuming blocks do not reach `m` and therefore take `w - a` time to produce. * Assume block completion including signing takes zero time. -* `BP-A` has e = 204, n = 200ms, h = 10ms, a = 17ms +* `BP-A` has e = 204, n = 0ms/200ms, h = 10ms, a = 17ms * `BP-A` sends b1 at `t1-17ms` => `BP-A-Peer` processes `h=10ms`, sends at `t-7ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t+203ms` => arrives at `BP-B` at `t+203ms`. * `BP-A` starts b2 at `t1-17ms`, sends b2 at `t2-34ms` => `BP-A-Peer` processes `h=10ms`, sends at `t2-24ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2+186ms` => arrives at `BP-B` at `t2+186ms`. * `BP-A` starts b3 at `t2-34ms`, ... -* `BP-A` starts b12 at `t11-187ms`, sends b12 at `t12-204ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-194ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t12+16ms` => arrives at `BP-B` at `t12-16ms` +* `BP-A` starts b12 at `t11-187ms`, sends b12 at `t12-204ms` => `BP-A-Peer` processes `h=10ms`, sends at `t12-194ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t12+16ms` => arrives at `BP-B` at `t12+16ms` ## Example 4: full blocks are produced early -* Assuming zero network latency between BP and immediate BP peer. +* Assuming zero network latency between `BP-A` & `BP-A Peer` and between `BP-B Peer` & `BP-B`. +* Assuming 200ms network latency between `BP-A Peer` & `BP-B Peer`. * Assume all blocks are full as there are enough queued up unapplied transactions ready to fill all blocks. * Assume a block can be produced with 200ms worth of transactions in 225ms worth of time. There is overhead for producing the block. -* `BP-A` has e = 120, m = 200ms, n = 200ms, h = 10ms, a = 10ms +* `BP-A` has e = 120, m = 200ms, n = 0ms/200ms, h = 10ms, a = 10ms * `BP-A` sends b1 at `t1-275s` => `BP-A-Peer` processes `h=10ms`, sends at `t-265ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t-55ms` => arrives at `BP-B` at `t-55ms`. * `BP-A` starts b2 at `t1-275ms`, sends b2 at `t2-550ms (t1-50ms)` => `BP-A-Peer` processes `h=10ms`, sends at `t2-540ms` =(200ms)> `BP-B-Peer` processes `h=10ms`, sends at `t2-330ms` => arrives at `BP-B` at `t2-330ms`. * `BP-A` starts b3 at `t2-550ms`, ... From 17266b189e8b927221e30ce1dd963b461477f270 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 13:31:28 -0500 Subject: [PATCH 07/12] GH-1784 Rename cpu_effort_us to cpu_effort --- .../eosio/producer_plugin/block_timing_util.hpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp index f4d68061c7..27ba83ff6c 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/block_timing_util.hpp @@ -45,25 +45,25 @@ namespace block_timing_util { // example, given block_interval=500 ms and cpu effort=400 ms, assuming our round starts at time point 0; in the // past, the block start time points would be at time point -500, 0, 500, 1000, 1500, 2000 .... With this new // approach, the block time points would become -500, -100, 300, 700, 1100 ... - inline fc::time_point production_round_block_start_time(fc::microseconds cpu_effort_us, chain::block_timestamp_type block_time) { + inline fc::time_point production_round_block_start_time(fc::microseconds cpu_effort, chain::block_timestamp_type block_time) { uint32_t block_slot = block_time.slot; uint32_t production_round_start_block_slot = (block_slot / chain::config::producer_repetitions) * chain::config::producer_repetitions; uint32_t production_round_index = block_slot % chain::config::producer_repetitions; return chain::block_timestamp_type(production_round_start_block_slot - 1).to_time_point() + - fc::microseconds(cpu_effort_us.count() * production_round_index); + fc::microseconds(cpu_effort.count() * production_round_index); } - inline fc::time_point calculate_producing_block_deadline(fc::microseconds cpu_effort_us, chain::block_timestamp_type block_time) { - auto estimated_deadline = production_round_block_start_time(cpu_effort_us, block_time) + fc::microseconds(cpu_effort_us); + inline fc::time_point calculate_producing_block_deadline(fc::microseconds cpu_effort, chain::block_timestamp_type block_time) { + auto estimated_deadline = production_round_block_start_time(cpu_effort, block_time) + cpu_effort; auto now = fc::time_point::now(); if (estimated_deadline > now) { return estimated_deadline; } else { // This could only happen when the producer stop producing and then comes back alive in the middle of its own // production round. In this case, we just use the hard deadline. - const auto hard_deadline = block_time.to_time_point() - fc::microseconds(chain::config::block_interval_us - cpu_effort_us.count()); - return std::min(hard_deadline, now + cpu_effort_us); + const auto hard_deadline = block_time.to_time_point() - fc::microseconds(chain::config::block_interval_us - cpu_effort.count()); + return std::min(hard_deadline, now + cpu_effort); } } @@ -118,7 +118,7 @@ namespace block_timing_util { // Return the *next* block start time according to its block time slot. // Returns empty optional if no producers are in the active_schedule. // block_num is only used for watermark minimum offset. - inline std::optional calculate_producer_wake_up_time(fc::microseconds cpu_effort_us, uint32_t block_num, + inline std::optional calculate_producer_wake_up_time(fc::microseconds cpu_effort, uint32_t block_num, const chain::block_timestamp_type& ref_block_time, const std::set& producers, const std::vector& active_schedule, @@ -141,7 +141,7 @@ namespace block_timing_util { return {}; } - return production_round_block_start_time(cpu_effort_us, chain::block_timestamp_type(wake_up_slot)); + return production_round_block_start_time(cpu_effort, chain::block_timestamp_type(wake_up_slot)); } } // namespace block_timing_util From 54e42bc1980d959b2348fc65d127ad9031514af5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 13:38:19 -0500 Subject: [PATCH 08/12] GH-1784 Update produce-block-offset-ms description --- docs/01_nodeos/03_plugins/producer_plugin/index.md | 6 +++--- plugins/producer_plugin/producer_plugin.cpp | 2 +- tests/PerformanceHarness/performance_test_basic.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md index 11157f5936..3d36b24f04 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/index.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md @@ -72,9 +72,9 @@ Config Options for eosio::producer_plugin: can extend during low usage (only enforced subjectively; use 1000 to not enforce any limit) - --produce-block-offset-ms arg (=450) The number of milliseconds early the - last block of a production round should - be produced. + --produce-block-offset-ms arg (=450) The minimum time to reserve at the end + of a production round for blocks to + propagate to the next block producer. --max-block-cpu-usage-threshold-us arg (=5000) Threshold of CPU block production to consider block full; when within diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 7fc20de7cc..c425447e58 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1044,7 +1044,7 @@ void producer_plugin::set_program_options( ("greylist-limit", boost::program_options::value()->default_value(1000), "Limit (between 1 and 1000) on the multiple that CPU/NET virtual resources can extend during low usage (only enforced subjectively; use 1000 to not enforce any limit)") ("produce-block-offset-ms", bpo::value()->default_value(config::default_produce_block_offset_ms), - "The number of milliseconds early the last block of a production round should be produced.") + "The minimum time to reserve at the end of a production round for blocks to propagate to the next block producer.") ("max-block-cpu-usage-threshold-us", bpo::value()->default_value( 5000 ), "Threshold of CPU block production to consider block full; when within threshold of max-block-cpu-usage block can be produced immediately") ("max-block-net-usage-threshold-bytes", bpo::value()->default_value( 1024 ), diff --git a/tests/PerformanceHarness/performance_test_basic.py b/tests/PerformanceHarness/performance_test_basic.py index f3448e56cd..0d56329985 100755 --- a/tests/PerformanceHarness/performance_test_basic.py +++ b/tests/PerformanceHarness/performance_test_basic.py @@ -720,7 +720,7 @@ def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defVa choices=["all", "debug", "info", "warn", "error", "off"], default="info") ptbBaseParserGroup.add_argument("--net-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in net_plugin thread pool", default=4) ptbBaseParserGroup.add_argument("--disable-subjective-billing", type=bool, help=argparse.SUPPRESS if suppressHelp else "Disable subjective CPU billing for API/P2P transactions", default=True) - ptbBaseParserGroup.add_argument("--produce-block-offset-ms", type=int, help=argparse.SUPPRESS if suppressHelp else "The number of milliseconds early the last block of a production round should be produced.", default=0) + ptbBaseParserGroup.add_argument("--produce-block-offset-ms", type=int, help=argparse.SUPPRESS if suppressHelp else "The minimum time to reserve at the end of a production round for blocks to propagate to the next block producer.", default=0) ptbBaseParserGroup.add_argument("--producer-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "Number of worker threads in producer thread pool", default=2) ptbBaseParserGroup.add_argument("--read-only-write-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the write window lasts.", default=200000) ptbBaseParserGroup.add_argument("--read-only-read-window-time-us", type=int, help=argparse.SUPPRESS if suppressHelp else "Time in microseconds the read window lasts.", default=60000) From 1cc8b3d7529f8723dbbb7541b316275c615dd08f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 14:06:30 -0500 Subject: [PATCH 09/12] GH-1784 Added comment --- .../include/eosio/producer_plugin/producer_plugin.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 0f38ef35f6..495d1b91a6 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -17,6 +17,7 @@ class producer_plugin : public appbase::plugin { struct runtime_options { std::optional max_transaction_time; std::optional max_irreversible_block_age; + // minimum time to reserve at the end of a production round for blocks to propagate to the next block producer. std::optional produce_block_offset_ms; std::optional subjective_cpu_leeway_us; std::optional greylist_limit; From a336ef25a358d40a74b7b254caea15c731213343 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 14:06:55 -0500 Subject: [PATCH 10/12] GH-1784 Renamed _produce_block_cpu_effort_us to _produce_block_cpu_effort --- plugins/producer_plugin/producer_plugin.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c425447e58..3b90583acd 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -501,7 +501,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _received_block{0}; // modified by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; // produce-block-offset is in terms of the complete round, internally use calculated value for each block of round - fc::microseconds _produce_block_cpu_effort_us; + fc::microseconds _produce_block_cpu_effort; fc::time_point _pending_block_deadline; uint32_t _max_block_cpu_usage_threshold_us = 0; uint32_t _max_block_net_usage_threshold_bytes = 0; @@ -615,12 +615,12 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); - auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort_us, chain.head_block_num(), calculate_pending_block_time(), + auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort, chain.head_block_num(), calculate_pending_block_time(), _producers, chain.head_block_state()->active_schedule.producers, _producer_watermarks); schedule_delayed_production_loop(weak_from_this(), wake_time); @@ -2497,7 +2497,7 @@ void producer_plugin_impl::schedule_production_loop() { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state"); - auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort_us, chain.pending_block_num(), chain.pending_block_timestamp(), + auto wake_time = block_timing_util::calculate_producer_wake_up_time(_produce_block_cpu_effort, chain.pending_block_num(), chain.pending_block_timestamp(), _producers, chain.head_block_state()->active_schedule.producers, _producer_watermarks); schedule_delayed_production_loop(weak_from_this(), wake_time); @@ -2514,7 +2514,7 @@ void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { assert(in_producing_mode()); // we succeeded but block may be exhausted static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); - auto deadline = block_timing_util::calculate_producing_block_deadline(_produce_block_cpu_effort_us, chain.pending_block_time()); + auto deadline = block_timing_util::calculate_producing_block_deadline(_produce_block_cpu_effort, chain.pending_block_time()); if (!exhausted && deadline > fc::time_point::now()) { // ship this block off no later than its deadline From 440d33a65debbf3bae5477685f8b41612970d93e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 15:00:59 -0500 Subject: [PATCH 11/12] GH-1784 Use ceil to take the most conservative approach to the user provided offset --- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 3b90583acd..08749dd47d 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -615,7 +615,8 @@ class producer_plugin_impl : public std::enable_shared_from_this(std::ceil(produce_block_offset_ms / static_cast(config::producer_repetitions))) ); } fc::microseconds get_produce_block_offset() const { From 015fc81a16f6719848098078b985f1f6f3adf90f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 20 Oct 2023 15:26:55 -0500 Subject: [PATCH 12/12] GH-1784 Calculate in microseconds as to not lose precision --- plugins/producer_plugin/producer_plugin.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 08749dd47d..e57fe48f54 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -615,8 +615,7 @@ class producer_plugin_impl : public std::enable_shared_from_this(std::ceil(produce_block_offset_ms / static_cast(config::producer_repetitions))) ); + _produce_block_cpu_effort = fc::microseconds(config::block_interval_us - (produce_block_offset_ms*1000 / config::producer_repetitions) ); } fc::microseconds get_produce_block_offset() const {