From e68743ab5b2e2a1472a9eb663822ac3c134a4bba Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 22 Aug 2023 03:16:45 -0500
Subject: [PATCH 01/61] Support throttling block syncing to peers.  WIP

---
 .../include/eosio/net_plugin/net_plugin.hpp   |   1 +
 .../include/eosio/net_plugin/protocol.hpp     |   3 +-
 plugins/net_plugin/net_plugin.cpp             |  93 +++++++++++---
 plugins/prometheus_plugin/metrics.hpp         |   1 +
 tests/CMakeLists.txt                          |   3 +
 tests/p2p_sync_throttle_test.py               | 119 ++++++++++++++++++
 tools/net-util.py                             |  33 ++---
 7 files changed, 223 insertions(+), 30 deletions(-)
 create mode 100755 tests/p2p_sync_throttle_test.py

diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
index d0c482e5b1..1db805ac4f 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
@@ -54,6 +54,7 @@ namespace eosio {
                std::chrono::nanoseconds last_bytes_received{0};
                size_t bytes_sent{0};
                std::chrono::nanoseconds last_bytes_sent{0};
+               size_t block_sync_bytes_sent{0};
                std::chrono::nanoseconds connection_start_time{0};
                std::string log_p2p_address;
             };
diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
index 5ca2ba1456..7e292e7bf2 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
@@ -16,9 +16,10 @@ namespace eosio {
 
    // Longest domain name is 253 characters according to wikipedia.
    // Addresses include ":port" where max port is 65535, which adds 6 chars.
+   // Addresses may also include ":bitrate" with suffix and separators, which adds 30 chars.
    // We also add our own extentions of "[:trx|:blk] - xxxxxxx", which adds 14 chars, total= 273.
    // Allow for future extentions as well, hence 384.
-   constexpr size_t max_p2p_address_length = 253 + 6;
+   constexpr size_t max_p2p_address_length = 253 + 6 + 30;
    constexpr size_t max_handshake_str_length = 384;
 
    struct handshake_message {
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index f7f5d5d51c..0634d21557 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -29,6 +29,7 @@
 #include <cmath>
 #include <memory>
 #include <new>
+#include <regex>
 
 // should be defined for c++17, but clang++16 still has not implemented it
 #ifdef __cpp_lib_hardware_interference_size
@@ -535,7 +536,7 @@ namespace eosio {
       bool in_sync() const;
       fc::logger& get_logger() { return logger; }
 
-      void create_session(tcp::socket&& socket, const string listen_address);
+      void create_session(tcp::socket&& socket, const string listen_address, const string limit);
    };
 
    // peer_[x]log must be called from thread in connection strand
@@ -771,7 +772,7 @@ namespace eosio {
       /// @brief ctor
       /// @param socket created by boost::asio in fc::listener
       /// @param address identifier of listen socket which accepted this new connection
-      explicit connection( tcp::socket&& socket, const string& listen_address );
+      explicit connection( tcp::socket&& socket, const string& listen_address, const string& limit_str );
       ~connection() = default;
 
       connection( const connection& ) = delete;
@@ -800,6 +801,7 @@ namespace eosio {
       std::chrono::nanoseconds get_last_bytes_received() const { return last_bytes_received.load(); }
       size_t get_bytes_sent() const { return bytes_sent.load(); }
       std::chrono::nanoseconds get_last_bytes_sent() const { return last_bytes_sent.load(); }
+      size_t get_block_sync_bytes_sent() const { return block_sync_bytes_sent.load(); }
       boost::asio::ip::port_type get_remote_endpoint_port() const { return remote_endpoint_port.load(); }
       void set_heartbeat_timeout(std::chrono::milliseconds msec) {
          hb_timeout = msec;
@@ -809,6 +811,7 @@ namespace eosio {
 
    private:
       static const string unknown;
+      static const std::map<std::string, int> prefix_multipliers;
 
       std::atomic<uint64_t> peer_ping_time_ns = std::numeric_limits<uint64_t>::max();
 
@@ -827,6 +830,8 @@ namespace eosio {
          blocks_only
       };
 
+      size_t                          block_sync_rate_limit{0};  // bytes/second, default unlimited
+
       std::atomic<connection_types>   connection_type{both};
       std::atomic<uint32_t>           peer_start_block_num{0};
       std::atomic<uint32_t>           peer_head_block_num{0};
@@ -835,6 +840,7 @@ namespace eosio {
       std::atomic<size_t>             bytes_received{0};
       std::atomic<std::chrono::nanoseconds>   last_bytes_received{0ns};
       std::atomic<size_t>             bytes_sent{0};
+      std::atomic<size_t>             block_sync_bytes_sent{0};
       std::atomic<std::chrono::nanoseconds>   last_bytes_sent{0ns};
       std::atomic<boost::asio::ip::port_type> remote_endpoint_port{0};
 
@@ -980,7 +986,7 @@ namespace eosio {
       void stop_send();
 
       void enqueue( const net_message &msg );
-      void enqueue_block( const signed_block_ptr& sb, bool to_sync_queue = false);
+      size_t enqueue_block( const signed_block_ptr& sb, bool to_sync_queue = false);
       void enqueue_buffer( const std::shared_ptr<std::vector<char>>& send_buffer,
                            go_away_reason close_after_send,
                            bool to_sync_queue = false);
@@ -1055,6 +1061,10 @@ namespace eosio {
    }; // class connection
 
    const string connection::unknown = "<unknown>";
+   const std::map<std::string, int> connection::prefix_multipliers{
+      {"",1},{"K",pow(10,3)},{"M",pow(10,6)},{"G",pow(10, 9)},{"T",pow(10, 12)},
+             {"Ki",pow(2,10)},{"Mi",pow(2,20)},{"Gi",pow(2,30)},{"Ti",pow(2,40)}
+   };
 
    // called from connection strand
    struct msg_handler : public fc::visitor<void> {
@@ -1185,7 +1195,7 @@ namespace eosio {
       fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) );
    }
 
-   connection::connection(tcp::socket&& s, const string& listen_address)
+   connection::connection(tcp::socket&& s, const string& listen_address, const string& limit_str)
       : listen_address( listen_address ),
         peer_addr(),
         strand( my_impl->thread_pool.get_executor() ),
@@ -1195,6 +1205,27 @@ namespace eosio {
         last_handshake_recv(),
         last_handshake_sent()
    {
+      std::istringstream in(limit_str);
+      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
+      in.imbue(std::locale(""));
+      double limit{0};
+      in >> limit;
+      if( limit > 0.0f ) {
+         std::string units;
+         in >> units;
+         std::regex units_regex{"([KMGT]?[i]?)B/s"};
+         std::smatch units_match;
+         std::regex_match(units, units_match, units_regex);
+         if( units_match.size() == 2 ) {
+            block_sync_rate_limit = static_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
+            peer_dlog( this, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
+         } else {
+            fc_wlog( logger, "listen address ${la} has invalid block sync limit specification, connection will not be throttled", ("la", listen_address));
+            block_sync_rate_limit = 0;
+         }
+      } else {
+         block_sync_rate_limit = static_cast<size_t>(limit);
+      }
       update_endpoints();
       fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) );
    }
@@ -1661,7 +1692,7 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
-         enqueue_block( sb, true );
+         block_sync_bytes_sent += enqueue_block( sb, true );
       } else {
          peer_ilog( this, "enqueue sync, unable to fetch block ${num}, sending benign_other go away", ("num", num) );
          peer_requested.reset(); // unable to provide requested blocks
@@ -1781,14 +1812,29 @@ namespace eosio {
    }
 
    // called from connection strand
-   void connection::enqueue_block( const signed_block_ptr& b, bool to_sync_queue) {
+   size_t connection::enqueue_block( const signed_block_ptr& b, bool to_sync_queue) {
       peer_dlog( this, "enqueue block ${num}", ("num", b->block_num()) );
       verify_strand_in_this_thread( strand, __func__, __LINE__ );
 
       block_buffer_factory buff_factory;
       auto sb = buff_factory.get_send_buffer( b );
       latest_blk_time = std::chrono::system_clock::now();
-      enqueue_buffer( sb, no_reason, to_sync_queue);
+      if( block_sync_rate_limit > 0 ) {
+         peer_dlog( this, "block_sync_rate_limit is set to ${l}", ("l", block_sync_rate_limit));
+         while( true) {
+            auto elapsed = std::chrono::time_point_cast<std::chrono::nanoseconds>(latest_blk_time) - connection_start_time;
+            auto current_rate = block_sync_bytes_sent / elapsed.time_since_epoch().count();
+            if( current_rate < block_sync_rate_limit ) {
+               enqueue_buffer( sb, no_reason, to_sync_queue);
+               break;
+            }
+            peer_dlog( this, "throttling sending to peer ${remote}", ("remote", log_remote_endpoint_ip));
+            usleep(100);
+         }
+      } else {
+         enqueue_buffer( sb, no_reason, to_sync_queue);
+      }
+      return sb->size();
    }
 
    // called from connection strand
@@ -2719,7 +2765,7 @@ namespace eosio {
    }
 
 
-   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address) {
+   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, const string limit) {
       uint32_t                  visitors  = 0;
       uint32_t                  from_addr = 0;
       boost::system::error_code rec;
@@ -2745,7 +2791,7 @@ namespace eosio {
                visitors < connections.get_max_client_count())) {
             fc_ilog(logger, "Accepted new connection: " + paddr_str);
 
-            connection_ptr new_connection = std::make_shared<connection>(std::move(socket), listen_address);
+            connection_ptr new_connection = std::make_shared<connection>(std::move(socket), listen_address, limit);
             new_connection->strand.post([new_connection, this]() {
                if (new_connection->start_session()) {
                   connections.add(new_connection);
@@ -3927,16 +3973,24 @@ namespace eosio {
    void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg )
    {
       cfg.add_options()
-         ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times.")
+         ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876:0")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times. "
+           "Block syncing to all peers connected via the port will be throttled to the specified rate. "
+           "See the 'p2p-peer-address' argument for format details.")
          ( "p2p-server-address", bpo::value< vector<string> >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.")
          ( "p2p-peer-address", bpo::value< vector<string> >()->composing(),
            "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n"
-           "  Syntax: host:port[:<trx>|<blk>]\n"
+           "  Syntax: host:port[:<trx>|<blk>][:<rate-cap>]\n"
            "  The optional 'trx' and 'blk' indicates to node that only transactions 'trx' or blocks 'blk' should be sent."
+           "  The optional rate cap will limit block sync bandwidth to the specified rate.  A number alone will be "
+           "  interpreted as bytes per second.  The number may be suffixed with units.  Supported units are: "
+           "  'B/s', 'KB/s', 'MB/s, 'GB/s', and 'TB/s'. Transactions and blocks outside of sync mode are not throttled."
            "  Examples:\n"
            "    p2p.eos.io:9876\n"
            "    p2p.trx.eos.io:9876:trx\n"
-           "    p2p.blk.eos.io:9876:blk\n")
+           "    p2p.blk.eos.io:9876:blk\n"
+           "    p2p.eos.io:9876:1MB/s\n"
+           "    p2p.blk.eos.io:9876:blk:250KB/s\n"
+           "    p2p.eos.io:9876:0.5GB/s")
          ( "p2p-max-nodes-per-host", bpo::value<int>()->default_value(def_max_nodes_per_host), "Maximum number of client nodes from any single IP address")
          ( "p2p-accept-transactions", bpo::value<bool>()->default_value(true), "Allow transactions received over p2p network to be evaluated and relayed if valid.")
          ( "p2p-auto-bp-peer", bpo::value< vector<string> >()->composing(),
@@ -4192,10 +4246,18 @@ namespace eosio {
 
                std::string extra_listening_log_info =
                      ", max clients is " + std::to_string(my->connections.get_max_client_count());
-                     
+
+               auto listen_addr = address;
+               auto limit = string("0");
+               if( std::count(address.begin(), address.end(), ':') > 1 ) {
+                  auto last_colon_location = address.rfind(':');
+                  listen_addr = std::string(address, 0, last_colon_location);
+                  limit = std::string(address, last_colon_location+1);
+               }
+
                fc::create_listener<tcp>(
-                     my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info,
-                     [my = my, addr = p2p_addr](tcp::socket&& socket) { my->create_session(std::move(socket), addr); });
+                     my->thread_pool.get_executor(), logger, accept_timeout, listen_addr, extra_listening_log_info,
+                     [my = my, addr = p2p_addr, limit = limit](tcp::socket&& socket) { fc_dlog( logger, "start listening on ${addr} with peer sync throttle ${limit}", ("addr", addr)("limit", limit)); my->create_session(std::move(socket), addr, limit); });
             } catch (const std::exception& e) {
                fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}",
                      ("addr", address)("what", e.what()) );
@@ -4479,6 +4541,7 @@ namespace eosio {
                , .last_bytes_received = (*it)->get_last_bytes_received()
                , .bytes_sent = (*it)->get_bytes_sent()
                , .last_bytes_sent = (*it)->get_last_bytes_sent()
+               , .block_sync_bytes_sent = (*it)->get_block_sync_bytes_sent()
                , .connection_start_time = (*it)->connection_start_time
                , .log_p2p_address = (*it)->log_p2p_address
             };
diff --git a/plugins/prometheus_plugin/metrics.hpp b/plugins/prometheus_plugin/metrics.hpp
index 873f65f3f4..ef82e5f2cb 100644
--- a/plugins/prometheus_plugin/metrics.hpp
+++ b/plugins/prometheus_plugin/metrics.hpp
@@ -190,6 +190,7 @@ struct catalog_type {
          add_and_set_gauge("last_bytes_received", peer.last_bytes_received.count());
          add_and_set_gauge("bytes_sent", peer.bytes_sent);
          add_and_set_gauge("last_bytes_sent", peer.last_bytes_sent.count());
+         add_and_set_gauge("block_sync_bytes_sent", peer.block_sync_bytes_sent);
          add_and_set_gauge("connection_start_time", peer.connection_start_time.count());
          add_and_set_gauge(peer.log_p2p_address, 0); // Empty gauge; we only want the label
       }
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 700abde685..e51901b817 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -51,6 +51,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_B
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY)
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_sync_throttle_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_sync_throttle_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY)
@@ -186,6 +187,8 @@ add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py
 set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests)
 add_test(NAME p2p_no_listen_test COMMAND tests/p2p_no_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
 set_property(TEST p2p_no_listen_test PROPERTY LABELS nonparallelizable_tests)
+add_test(NAME p2p_sync_throttle_test COMMAND tests/p2p_sync_throttle_test.py -v -d 2 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
+set_property(TEST p2p_sync_throttle_test PROPERTY LABELS nonparallelizable_tests)
 
 # needs iproute-tc or iproute2 depending on platform
 #add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
new file mode 100755
index 0000000000..13273b6efe
--- /dev/null
+++ b/tests/p2p_sync_throttle_test.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+
+import signal
+import time
+
+from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys
+from TestHarness.TestHelper import AppArgs
+
+###############################################################
+# p2p_sync_throttle_test
+#
+# Test throttling of a peer during block syncing.
+#
+###############################################################
+
+
+Print=Utils.Print
+errorExit=Utils.errorExit
+
+appArgs = AppArgs()
+appArgs.add(flag='--plugin',action='append',type=str,help='Run nodes with additional plugins')
+appArgs.add(flag='--connection-cleanup-period',type=int,help='Interval in whole seconds to run the connection reaper and metric collection')
+
+args=TestHelper.parse_args({"-p","-d","--keep-logs","--prod-count"
+                            ,"--dump-error-details","-v","--leave-running"
+                            ,"--unshared"},
+                            applicationSpecificArgs=appArgs)
+pnodes=args.p
+delay=args.d
+debug=args.v
+prod_count = args.prod_count
+total_nodes=4
+dumpErrorDetails=args.dump_error_details
+
+Utils.Debug=debug
+testSuccessful=False
+
+cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs)
+walletMgr=WalletMgr(True)
+
+try:
+    TestHelper.printSystemInfo("BEGIN")
+
+    cluster.setWalletMgr(walletMgr)
+
+    Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}')
+
+    Print("Stand up cluster")
+    if args.plugin:
+        extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)])
+    else:
+        extraNodeosArgs = ''
+    if cluster.launch(pnodes=pnodes, unstartedNodes=2, totalNodes=total_nodes, prodCount=prod_count, topo='line', 
+                      delay=delay, extraNodeosArgs=extraNodeosArgs) is False:
+        errorExit("Failed to stand up eos cluster.")
+
+    prodNode = cluster.getNode(0)
+    nonProdNode = cluster.getNode(1)
+
+    accounts=createAccountKeys(2)
+    if accounts is None:
+        Utils.errorExit("FAILURE - create keys")
+
+    accounts[0].name="tester111111"
+    accounts[1].name="tester222222"
+
+    account1PrivKey = accounts[0].activePrivateKey
+    account2PrivKey = accounts[1].activePrivateKey
+
+    testWalletName="test"
+
+    Print("Creating wallet \"%s\"." % (testWalletName))
+    testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1]])
+
+    # create accounts via eosio as otherwise a bid is needed
+    for account in accounts:
+        Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name))
+        trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
+        transferAmount="100000000.0000 {0}".format(CORE_SYMBOL)
+        Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
+        nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True)
+        trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True)
+
+    beginLargeBlocksHeadBlock = nonProdNode.getHeadBlockNum()
+
+    Print("Configure and launch txn generators")
+    targetTpsPerGenerator = 100
+    testTrxGenDurationSec=60
+    trxGeneratorCnt=1
+    cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name,accounts[1].name],
+                                acctPrivKeysList=[account1PrivKey,account2PrivKey], nodeId=prodNode.nodeId, tpsPerGenerator=targetTpsPerGenerator,
+                                numGenerators=trxGeneratorCnt, durationSec=testTrxGenDurationSec, waitToComplete=True)
+
+    endLargeBlocksHeadBlock = nonProdNode.getHeadBlockNum()
+
+    throttleNode = cluster.unstartedNodes[0]
+    i = throttleNode.cmd.index('--p2p-listen-endpoint')
+    throttleNode.cmd[i+1] = throttleNode.cmd[i+1] + ':100B/s'
+
+    cluster.biosNode.kill(signal.SIGTERM)
+    clusterStart = time.time()
+    cluster.launchUnstarted(2)
+
+    syncNode = cluster.getNode(3)
+    time.sleep(15)
+    throttleNode.waitForBlock(endLargeBlocksHeadBlock)
+    endUnthrottledSync = time.time()
+    syncNode.waitForBlock(endLargeBlocksHeadBlock)
+    endSync = time.time()
+    Print(f'Unthrottled sync time: {endUnthrottledSync - clusterStart} seconds')
+    Print(f'Throttled sync time: {endSync - clusterStart} seconds')
+    assert endSync - clusterStart > endUnthrottledSync - clusterStart + 10, 'Throttled sync time must be at least 10 seconds greater than unthrottled'
+
+    testSuccessful=True
+finally:
+    TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails)
+
+exitCode = 0 if testSuccessful else 1
+exit(exitCode)
diff --git a/tools/net-util.py b/tools/net-util.py
index 01d0862749..66f48d8ee4 100755
--- a/tools/net-util.py
+++ b/tools/net-util.py
@@ -35,7 +35,7 @@ def humanReadableBytesPerSecond(bytes: int, telco:bool = False):
     while bytes > power:
         bytes /= power
         n += 1
-    return f'{"~0" if bytes < 0.01 else format(bytes, ".2f")} {labels[n]}B/s'
+    return f'{"-" if bytes == 0.0 else "~0" if bytes < 0.01 else format(bytes, ".2f")} {labels[n]}B/s'
 
 
 class TextSimpleFocusListWalker(urwid.SimpleFocusListWalker):
@@ -165,6 +165,7 @@ def __init__(self):
             ('\nRcv\nRate', 'receiveBandwidthLW'),
             ('Last\nRcv\nTime', 'lastBytesReceivedLW'),
             ('Last\nRcvd\nBlock', 'lastReceivedBlockLW'),
+            ('Blk\nSync\nRate', 'blockSyncBandwidthLW'),
             ('Unique\nFirst\nBlks', 'uniqueFirstBlockCountLW'),
             ('First\nAvail\nBlk', 'firstAvailableBlockLW'),
             ('Last\nAvail\nBlk', 'lastAvailableBlockLW'),
@@ -298,6 +299,7 @@ class bandwidthStats():
                 def __init__(self, bytesReceived=0, bytesSent=0, connectionStarted=0):
                     self.bytesReceived = 0
                     self.bytesSent = 0
+                    self.blockSyncBytesSent = 0
                     self.connectionStarted = 0
             for family in text_string_to_metric_families(response.text):
                 bandwidths = {}
@@ -327,19 +329,20 @@ def __init__(self, bytesReceived=0, bytesSent=0, connectionStarted=0):
                                 host = f'{str(addr.ipv4_mapped) if addr.ipv4_mapped else str(addr)}'
                                 listwalker[startOffset:endOffset] = [AttrMap(Text(host), None, 'reversed')]
                             elif fieldName == 'bytes_received':
-                                bytesReceived = int(sample.value)
                                 stats = bandwidths.get(connID, bandwidthStats())
-                                stats.bytesReceived = bytesReceived
+                                stats.bytesReceived = int(sample.value)
                                 bandwidths[connID] = stats
                             elif fieldName == 'bytes_sent':
-                                bytesSent = int(sample.value)
                                 stats = bandwidths.get(connID, bandwidthStats())
-                                stats.bytesSent = bytesSent
+                                stats.bytesSent = int(sample.value)
+                                bandwidths[connID] = stats
+                            elif fieldName == 'block_sync_bytes_sent':
+                                stats = bandwidths.get(connID, bandwidthStats())
+                                stats.blockSyncBytesSent = int(sample.value)
                                 bandwidths[connID] = stats
                             elif fieldName == 'connection_start_time':
-                                connectionStarted = int(sample.value)
                                 stats = bandwidths.get(connID, bandwidthStats())
-                                stats.connectionStarted = connectionStarted
+                                stats.connectionStarted = int(sample.value)
                                 bandwidths[connID] = stats
                             else:
                                 attrname = fieldName[:1] + fieldName.replace('_', ' ').title().replace(' ', '')[1:] + 'LW'
@@ -363,17 +366,19 @@ def __init__(self, bytesReceived=0, bytesSent=0, connectionStarted=0):
                 else:
                     if sample.name == 'nodeos_p2p_connections':
                         now = time.time_ns()
+                        def updateBandwidth(connectedSeconds, listwalker, byteCount, startOffset, endOffset):
+                            bps = byteCount/connectedSeconds
+                            listwalker[startOffset:endOffset] = [AttrMap(Text(humanReadableBytesPerSecond(bps)), None, 'reversed')]
                         connIDListwalker = getattr(self, 'connectionIDLW')
                         for connID, stats in bandwidths.items():
                             startOffset = connIDListwalker.index(connID)
                             endOffset = startOffset + 1
-                            connected_seconds = (now - stats.connectionStarted)/1000000000
-                            listwalker = getattr(self, 'receiveBandwidthLW')
-                            bps = stats.bytesReceived/connected_seconds
-                            listwalker[startOffset:endOffset] = [AttrMap(Text(humanReadableBytesPerSecond(bps)), None, 'reversed')]
-                            listwalker = getattr(self, 'sendBandwidthLW')
-                            bps = stats.bytesSent/connected_seconds
-                            listwalker[startOffset:endOffset] = [AttrMap(Text(humanReadableBytesPerSecond(bps)), None, 'reversed')]
+                            connectedSeconds = (now - stats.connectionStarted)/1000000000
+                            for listwalkerName, attrName in [('receiveBandwidthLW', 'bytesReceived'),
+                                                              ('sendBandwidthLW', 'bytesSent'),
+                                                              ('blockSyncBandwidthLW', 'blockSyncBytesSent')]:
+                                listwalker = getattr(self, listwalkerName)
+                                updateBandwidth(connectedSeconds, listwalker, getattr(stats, attrName), startOffset, endOffset)
         mainLoop.set_alarm_in(float(self.args.refresh_interval), self.update)
 
 def exitOnQ(key):

From 303c3d625631ef30409cddfd39f6a8a1ca656fb2 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 22 Aug 2023 19:52:36 -0500
Subject: [PATCH 02/61] Add exponential backoff to throttle.  Fix wretched
 math.

Add necessary custom topology for p2p_sync_throttle_test.
---
 plugins/net_plugin/net_plugin.cpp       |  20 ++--
 tests/CMakeLists.txt                    |   1 +
 tests/p2p_sync_throttle_test.py         |   9 +-
 tests/p2p_sync_throttle_test_shape.json | 142 ++++++++++++++++++++++++
 4 files changed, 159 insertions(+), 13 deletions(-)
 create mode 100644 tests/p2p_sync_throttle_test_shape.json

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 0634d21557..eeb22f6098 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -968,10 +968,11 @@ namespace eosio {
       void send_time(const time_message& msg);
       /** \brief Read system time and convert to a 64 bit integer.
        *
-       * There are five calls to this routine in the program.  One
+       * There are six calls to this routine in the program.  One
        * when a packet arrives from the network, one when a packet
-       * is placed on the send queue, one during start session, and
-       * one each when data is counted as received or sent.
+       * is placed on the send queue, one during start session, one
+       * when a sync block is queued and one each when data is
+       * counted as received or sent.
        * Calls the kernel time of day routine and converts to 
        * a (at least) 64 bit integer.
        */
@@ -1218,7 +1219,7 @@ namespace eosio {
          std::regex_match(units, units_match, units_regex);
          if( units_match.size() == 2 ) {
             block_sync_rate_limit = static_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            peer_dlog( this, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
+            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
          } else {
             fc_wlog( logger, "listen address ${la} has invalid block sync limit specification, connection will not be throttled", ("la", listen_address));
             block_sync_rate_limit = 0;
@@ -1820,16 +1821,17 @@ namespace eosio {
       auto sb = buff_factory.get_send_buffer( b );
       latest_blk_time = std::chrono::system_clock::now();
       if( block_sync_rate_limit > 0 ) {
-         peer_dlog( this, "block_sync_rate_limit is set to ${l}", ("l", block_sync_rate_limit));
+         int sleep_time_us = 100;
+         const int max_sleep_time_us = 100000;
          while( true) {
-            auto elapsed = std::chrono::time_point_cast<std::chrono::nanoseconds>(latest_blk_time) - connection_start_time;
-            auto current_rate = block_sync_bytes_sent / elapsed.time_since_epoch().count();
+            auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
+            auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
             if( current_rate < block_sync_rate_limit ) {
                enqueue_buffer( sb, no_reason, to_sync_queue);
                break;
             }
-            peer_dlog( this, "throttling sending to peer ${remote}", ("remote", log_remote_endpoint_ip));
-            usleep(100);
+            usleep(sleep_time_us);
+            sleep_time_us = sleep_time_us*2 > max_sleep_time_us ? max_sleep_time_us : sleep_time_us*2;
          }
       } else {
          enqueue_buffer( sb, no_reason, to_sync_queue);
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index e51901b817..ba86e17bf2 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -52,6 +52,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURR
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_sync_throttle_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_sync_throttle_test.py COPYONLY)
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_sync_throttle_test_shape.json ${CMAKE_CURRENT_BINARY_DIR}/p2p_sync_throttle_test_shape.json COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY)
 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY)
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 13273b6efe..4ee9b19fd7 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -50,8 +50,9 @@
         extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)])
     else:
         extraNodeosArgs = ''
-    if cluster.launch(pnodes=pnodes, unstartedNodes=2, totalNodes=total_nodes, prodCount=prod_count, topo='line', 
-                      delay=delay, extraNodeosArgs=extraNodeosArgs) is False:
+    if cluster.launch(pnodes=pnodes, unstartedNodes=2, totalNodes=total_nodes, prodCount=prod_count, 
+                      topo='./tests/p2p_sync_throttle_test_shape.json', delay=delay, 
+                      extraNodeosArgs=extraNodeosArgs) is False:
         errorExit("Failed to stand up eos cluster.")
 
     prodNode = cluster.getNode(0)
@@ -95,7 +96,7 @@
 
     throttleNode = cluster.unstartedNodes[0]
     i = throttleNode.cmd.index('--p2p-listen-endpoint')
-    throttleNode.cmd[i+1] = throttleNode.cmd[i+1] + ':100B/s'
+    throttleNode.cmd[i+1] = throttleNode.cmd[i+1] + ':1000B/s'
 
     cluster.biosNode.kill(signal.SIGTERM)
     clusterStart = time.time()
@@ -109,7 +110,7 @@
     endSync = time.time()
     Print(f'Unthrottled sync time: {endUnthrottledSync - clusterStart} seconds')
     Print(f'Throttled sync time: {endSync - clusterStart} seconds')
-    assert endSync - clusterStart > endUnthrottledSync - clusterStart + 10, 'Throttled sync time must be at least 10 seconds greater than unthrottled'
+    assert endSync - clusterStart > endUnthrottledSync - clusterStart + 50, 'Throttled sync time must be at least 50 seconds greater than unthrottled'
 
     testSuccessful=True
 finally:
diff --git a/tests/p2p_sync_throttle_test_shape.json b/tests/p2p_sync_throttle_test_shape.json
new file mode 100644
index 0000000000..4252ab483a
--- /dev/null
+++ b/tests/p2p_sync_throttle_test_shape.json
@@ -0,0 +1,142 @@
+{
+  "name": "testnet_", 
+  "nodes": {
+    "bios": {
+      "index": -100, 
+      "name": "bios", 
+      "keys": [
+        {
+          "pubkey": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", 
+          "privkey": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
+        }
+      ], 
+      "peers": [
+        "testnet_00"
+      ], 
+      "producers": [
+        "eosio"
+      ], 
+      "dont_start": false, 
+      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_bios", 
+      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_bios", 
+      "p2p_port": 9776, 
+      "http_port": 8788, 
+      "host_name": "localhost", 
+      "public_name": "localhost", 
+      "listen_addr": "0.0.0.0", 
+      "_dot_label": "localhost:9776\nbios\nprod=eosio"
+    }, 
+    "testnet_00": {
+      "index": 0, 
+      "name": "testnet_00", 
+      "keys": [
+        {
+          "pubkey": "EOS7D6jfN6bbJD9cYheyhnBT4bmUWc3Qf4Yphf5GBeAAy58okcwHU", 
+          "privkey": "5KkmnyunnpCQzgFoLMEtU3j7BRBa5aWmsBNru49ke7LdnZKFhmt"
+        }
+      ], 
+      "peers": [],
+      "producers": [
+        "defproducera", 
+        "defproducerb", 
+        "defproducerc", 
+        "defproducerd", 
+        "defproducere", 
+        "defproducerf", 
+        "defproducerg", 
+        "defproducerh", 
+        "defproduceri", 
+        "defproducerj", 
+        "defproducerk", 
+        "defproducerl", 
+        "defproducerm", 
+        "defproducern", 
+        "defproducero", 
+        "defproducerp", 
+        "defproducerq", 
+        "defproducerr", 
+        "defproducers", 
+        "defproducert", 
+        "defproduceru"
+      ], 
+      "dont_start": false, 
+      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_00", 
+      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_00", 
+      "p2p_port": 9876, 
+      "http_port": 8888, 
+      "host_name": "localhost", 
+      "public_name": "localhost", 
+      "listen_addr": "0.0.0.0", 
+      "_dot_label": "localhost:9876\ntestnet_00\nprod=defproducera\ndefproducerb\ndefproducerc\ndefproducerd\ndefproducere\ndefproducerf\ndefproducerg\ndefproducerh\ndefproduceri\ndefproducerj\ndefproducerk\ndefproducerl\ndefproducerm\ndefproducern\ndefproducero\ndefproducerp\ndefproducerq\ndefproducerr\ndefproducers\ndefproducert\ndefproduceru"
+    }, 
+    "testnet_01": {
+      "index": 1, 
+      "name": "testnet_01", 
+      "keys": [
+        {
+          "pubkey": "EOS5tZqxLB8y9q2yHkgcXU4QFBEV6QKN3NQ54ygaFLWHJbjqYzFhw", 
+          "privkey": "5KBs4qR7T8shJjCJUeFQXd77iKrok5TCtZiQhWJpCpc1VRxpNAs"
+        }
+      ], 
+      "peers": [
+        "testnet_00"
+      ], 
+      "producers": [], 
+      "dont_start": false, 
+      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_01", 
+      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_01", 
+      "p2p_port": 9877, 
+      "http_port": 8889, 
+      "host_name": "localhost", 
+      "public_name": "localhost", 
+      "listen_addr": "0.0.0.0", 
+      "_dot_label": "localhost:9877\ntestnet_01\nprod=<none>"
+    }, 
+    "testnet_02": {
+      "index": 2, 
+      "name": "testnet_02", 
+      "keys": [
+        {
+          "pubkey": "EOS5FBPf5EN9bYEqmsKfPx9bxyUZ9grDiE24zqLFXtPa6UpVzMjE7", 
+          "privkey": "5HtVDiAsD24seDm5sdswTcdZpx672XbBW9gBkyrzbsj2j9Y9JeC"
+        }
+      ],
+      "peers": [
+        "testnet_01"
+      ], 
+      "producers": [], 
+      "dont_start": true, 
+      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_02", 
+      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_02", 
+      "p2p_port": 9878, 
+      "http_port": 8890, 
+      "host_name": "localhost", 
+      "public_name": "localhost", 
+      "listen_addr": "0.0.0.0", 
+      "_dot_label": "localhost:9878\ntestnet_02\nprod=<none>"
+    }, 
+    "testnet_03": {
+      "index": 3, 
+      "name": "testnet_03", 
+      "keys": [
+        {
+          "pubkey": "EOS8XH2gKxsef9zxmMHm4vaSvxQUhg7W4GC3nK2KSRxyYrNG5gZFS", 
+          "privkey": "5JcoRRhDcgm51dkBrRTmErceTqrYhrq22UnmUjTZToMpH91B9N1"
+        }
+      ], 
+      "peers": [
+        "testnet_02"
+      ],
+      "producers": [],
+      "dont_start": true, 
+      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_03", 
+      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_03", 
+      "p2p_port": 9879, 
+      "http_port": 8891, 
+      "host_name": "localhost", 
+      "public_name": "localhost", 
+      "listen_addr": "0.0.0.0", 
+      "_dot_label": "localhost:9879\ntestnet_03\nprod=<none>"
+    }
+  }
+}
\ No newline at end of file

From 70b530bb4a750fce2aa3c9037b2bdcb64c7ddb0b Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Thu, 24 Aug 2023 14:07:58 -0500
Subject: [PATCH 03/61] Experiment: How many tests fail if waitForObj default
 times out

---
 tests/TestHarness/testUtils.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/tests/TestHarness/testUtils.py b/tests/TestHarness/testUtils.py
index c386953c4c..df320ca2ca 100644
--- a/tests/TestHarness/testUtils.py
+++ b/tests/TestHarness/testUtils.py
@@ -256,6 +256,9 @@ def waitForObj(lam, timeout=None, sleepTime=1, reporter=None):
                 if reporter is not None:
                     reporter()
                 time.sleep(sleepTime)
+            else:
+                if timeout == 60:
+                    raise RuntimeError('waitForObj reached 60 second timeout')
         finally:
             if needsNewLine:
                 Utils.Print()

From b92d84cae994b69cda28332edcc2aa3878533405 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Thu, 24 Aug 2023 16:14:16 -0500
Subject: [PATCH 04/61] Address review comments in net_plugin.

Clarify variable names in p2p throttled sync test and tweak numbers.
Fix p2p throttled test to actually function (waitForBlock has a hidden
default timeout).
Bump up timeout in block_log_util_test.
---
 plugins/net_plugin/net_plugin.cpp | 74 ++++++++++++++++---------------
 tests/block_log_util_test.py      |  2 +-
 tests/p2p_sync_throttle_test.py   | 22 ++++-----
 3 files changed, 51 insertions(+), 47 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index eeb22f6098..8496bed571 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -536,7 +536,7 @@ namespace eosio {
       bool in_sync() const;
       fc::logger& get_logger() { return logger; }
 
-      void create_session(tcp::socket&& socket, const string listen_address, const string limit);
+      void create_session(tcp::socket&& socket, const string listen_address, const string& limit);
    };
 
    // peer_[x]log must be called from thread in connection strand
@@ -788,6 +788,7 @@ namespace eosio {
       static std::string state_str(connection_state s);
       const string& peer_address() const { return peer_addr; } // thread safe, const
 
+      void set_connection_limit( const string& limit_str );
       void set_connection_type( const string& peer_addr );
       bool is_transactions_only_connection()const { return connection_type == transactions_only; } // thread safe, atomic
       bool is_blocks_only_connection()const { return connection_type == blocks_only; }
@@ -1206,32 +1207,11 @@ namespace eosio {
         last_handshake_recv(),
         last_handshake_sent()
    {
-      std::istringstream in(limit_str);
-      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
-      in.imbue(std::locale(""));
-      double limit{0};
-      in >> limit;
-      if( limit > 0.0f ) {
-         std::string units;
-         in >> units;
-         std::regex units_regex{"([KMGT]?[i]?)B/s"};
-         std::smatch units_match;
-         std::regex_match(units, units_match, units_regex);
-         if( units_match.size() == 2 ) {
-            block_sync_rate_limit = static_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
-         } else {
-            fc_wlog( logger, "listen address ${la} has invalid block sync limit specification, connection will not be throttled", ("la", listen_address));
-            block_sync_rate_limit = 0;
-         }
-      } else {
-         block_sync_rate_limit = static_cast<size_t>(limit);
-      }
+      set_connection_limit(limit_str);
       update_endpoints();
       fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) );
    }
 
-   // called from connection strand
    void connection::update_endpoints() {
       boost::system::error_code ec;
       boost::system::error_code ec2;
@@ -1258,6 +1238,30 @@ namespace eosio {
       }
    }
 
+   void connection::set_connection_limit( const std::string& limit_str) {
+      std::istringstream in(limit_str);
+      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
+      in.imbue(std::locale(""));
+      double limit{0};
+      in >> limit;
+      if( limit > 0.0f ) {
+         std::string units;
+         in >> units;
+         std::regex units_regex{"([KMGT]?[i]?)B/s"};
+         std::smatch units_match;
+         std::regex_match(units, units_match, units_regex);
+         try {
+            block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
+            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
+         } catch (boost::numeric::bad_numeric_cast&) {
+            fc_wlog( logger, "listen address ${la} block sync limit specification overflowed, connection will not be throttled", ("la", listen_address));
+            block_sync_rate_limit = 0;
+         }
+      } else {
+         block_sync_rate_limit = 0;
+      }
+   }
+
    // called from connection strand
    void connection::set_connection_type( const std::string& peer_add ) {      
       auto [host, port, type] = split_host_port_type(peer_add);
@@ -2767,7 +2771,7 @@ namespace eosio {
    }
 
 
-   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, const string limit) {
+   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, const string& limit) {
       uint32_t                  visitors  = 0;
       uint32_t                  from_addr = 0;
       boost::system::error_code rec;
@@ -3975,24 +3979,24 @@ namespace eosio {
    void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg )
    {
       cfg.add_options()
-         ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876:0")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times. "
-           "Block syncing to all peers connected via the port will be throttled to the specified rate. "
-           "See the 'p2p-peer-address' argument for format details.")
+         ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876:0")) ), "The actual host:port[:<rate-cap>] used to listen for incoming p2p connections. May be used multiple times. "
+           "  The optional rate cap will limit block sync bandwidth to the specified rate.  A number alone will be "
+           "  interpreted as bytes per second.  The number may be suffixed with units.  Supported units are: "
+           "  'B/s', 'KB/s', 'MB/s, 'GB/s', 'TB/s', 'KiB/s', 'MiB/s', 'GiB/s', 'TiB/s'."
+           "  Transactions and blocks outside of sync mode are not throttled."
+           "  Examples:\n"
+           "    192.168.0.100:9876:1MiB/s\n"
+           "    node.eos.io:9876:250KB/s\n"
+           "    node.eos.io:9876:0.5GB/s")
          ( "p2p-server-address", bpo::value< vector<string> >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.")
          ( "p2p-peer-address", bpo::value< vector<string> >()->composing(),
            "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n"
-           "  Syntax: host:port[:<trx>|<blk>][:<rate-cap>]\n"
+           "  Syntax: host:port[:<trx>|<blk>]\n"
            "  The optional 'trx' and 'blk' indicates to node that only transactions 'trx' or blocks 'blk' should be sent."
-           "  The optional rate cap will limit block sync bandwidth to the specified rate.  A number alone will be "
-           "  interpreted as bytes per second.  The number may be suffixed with units.  Supported units are: "
-           "  'B/s', 'KB/s', 'MB/s, 'GB/s', and 'TB/s'. Transactions and blocks outside of sync mode are not throttled."
            "  Examples:\n"
            "    p2p.eos.io:9876\n"
            "    p2p.trx.eos.io:9876:trx\n"
-           "    p2p.blk.eos.io:9876:blk\n"
-           "    p2p.eos.io:9876:1MB/s\n"
-           "    p2p.blk.eos.io:9876:blk:250KB/s\n"
-           "    p2p.eos.io:9876:0.5GB/s")
+           "    p2p.blk.eos.io:9876:blk\n")
          ( "p2p-max-nodes-per-host", bpo::value<int>()->default_value(def_max_nodes_per_host), "Maximum number of client nodes from any single IP address")
          ( "p2p-accept-transactions", bpo::value<bool>()->default_value(true), "Allow transactions received over p2p network to be evaluated and relayed if valid.")
          ( "p2p-auto-bp-peer", bpo::value< vector<string> >()->composing(),
diff --git a/tests/block_log_util_test.py b/tests/block_log_util_test.py
index bd7bff144e..042e1467aa 100755
--- a/tests/block_log_util_test.py
+++ b/tests/block_log_util_test.py
@@ -70,7 +70,7 @@ def verifyBlockLog(expected_block_num, trimmedBlockLog):
     node0.kill(signal.SIGTERM)
 
     Print("Wait for node0's head block to become irreversible")
-    node1.waitForBlock(headBlockNum, blockType=BlockType.lib)
+    node1.waitForBlock(headBlockNum, blockType=BlockType.lib, timeout=90)
     infoAfter=node1.getInfo(exitOnError=True)
     headBlockNumAfter=infoAfter["head_block_num"]
 
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 4ee9b19fd7..380f3558e2 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -94,23 +94,23 @@
 
     endLargeBlocksHeadBlock = nonProdNode.getHeadBlockNum()
 
-    throttleNode = cluster.unstartedNodes[0]
-    i = throttleNode.cmd.index('--p2p-listen-endpoint')
-    throttleNode.cmd[i+1] = throttleNode.cmd[i+1] + ':1000B/s'
+    throttlingNode = cluster.unstartedNodes[0]
+    i = throttlingNode.cmd.index('--p2p-listen-endpoint')
+    throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':40000B/s'
 
     cluster.biosNode.kill(signal.SIGTERM)
     clusterStart = time.time()
     cluster.launchUnstarted(2)
 
-    syncNode = cluster.getNode(3)
+    throttledNode = cluster.getNode(3)
     time.sleep(15)
-    throttleNode.waitForBlock(endLargeBlocksHeadBlock)
-    endUnthrottledSync = time.time()
-    syncNode.waitForBlock(endLargeBlocksHeadBlock)
-    endSync = time.time()
-    Print(f'Unthrottled sync time: {endUnthrottledSync - clusterStart} seconds')
-    Print(f'Throttled sync time: {endSync - clusterStart} seconds')
-    assert endSync - clusterStart > endUnthrottledSync - clusterStart + 50, 'Throttled sync time must be at least 50 seconds greater than unthrottled'
+    assert throttlingNode.waitForBlock(endLargeBlocksHeadBlock), f'wait for block {endLargeBlocksHeadBlock}  on throttled node timed out'
+    endThrottlingSync = time.time()
+    assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
+    endThrottledSync = time.time()
+    Print(f'Unthrottled sync time: {endThrottlingSync - clusterStart} seconds')
+    Print(f'Throttled sync time: {endThrottledSync - clusterStart} seconds')
+    assert endThrottledSync - clusterStart > endThrottlingSync - clusterStart + 30, 'Throttled sync time must be at least 30 seconds greater than unthrottled'
 
     testSuccessful=True
 finally:

From dc54d46bde8367f335305c4f332ca5db503dab8a Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Thu, 24 Aug 2023 16:56:33 -0500
Subject: [PATCH 05/61] Further tweak the sync throttle test for machines
 faster than mine.

---
 tests/p2p_sync_throttle_test.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 380f3558e2..6c9d059bf0 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -110,7 +110,7 @@
     endThrottledSync = time.time()
     Print(f'Unthrottled sync time: {endThrottlingSync - clusterStart} seconds')
     Print(f'Throttled sync time: {endThrottledSync - clusterStart} seconds')
-    assert endThrottledSync - clusterStart > endThrottlingSync - clusterStart + 30, 'Throttled sync time must be at least 30 seconds greater than unthrottled'
+    assert endThrottledSync - clusterStart > endThrottlingSync - clusterStart + 15, 'Throttled sync time must be at least 15 seconds greater than unthrottled'
 
     testSuccessful=True
 finally:

From 3a508641e8a0db0d32f10110391cb4756802f6ce Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 25 Aug 2023 15:23:32 -0500
Subject: [PATCH 06/61] Move block sync throttling to the correct layer in the
 call stack.

Remove exponential backoff in throttle and utilize existing retry
mechanism.
---
 plugins/net_plugin/net_plugin.cpp | 34 ++++++++++++++-----------------
 1 file changed, 15 insertions(+), 19 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 8496bed571..b8bd0e0217 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1685,7 +1685,7 @@ namespace eosio {
       } else {
          peer_dlog( this, "enqueue sync block ${num}", ("num", peer_requested->last + 1) );
       }
-      uint32_t num = ++peer_requested->last;
+      uint32_t num = peer_requested->last + 1;
       if(num == peer_requested->end_block) {
          peer_requested.reset();
          peer_dlog( this, "completing enqueue_sync_block ${num}", ("num", num) );
@@ -1697,14 +1697,25 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
-         block_sync_bytes_sent += enqueue_block( sb, true );
+         if( block_sync_rate_limit > 0 ) {
+            auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
+            auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
+            if( current_rate < block_sync_rate_limit ) {
+               block_sync_bytes_sent += enqueue_block( sb, true );
+               ++peer_requested->last;
+            } else {
+               return false;
+            }
+         } else {
+            block_sync_bytes_sent += enqueue_block( sb, true );
+            ++peer_requested->last;
+         }
       } else {
          peer_ilog( this, "enqueue sync, unable to fetch block ${num}, sending benign_other go away", ("num", num) );
          peer_requested.reset(); // unable to provide requested blocks
          no_retry = benign_other;
          enqueue( go_away_message( benign_other ) );
       }
-
       return true;
    }
 
@@ -1824,22 +1835,7 @@ namespace eosio {
       block_buffer_factory buff_factory;
       auto sb = buff_factory.get_send_buffer( b );
       latest_blk_time = std::chrono::system_clock::now();
-      if( block_sync_rate_limit > 0 ) {
-         int sleep_time_us = 100;
-         const int max_sleep_time_us = 100000;
-         while( true) {
-            auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
-            auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
-            if( current_rate < block_sync_rate_limit ) {
-               enqueue_buffer( sb, no_reason, to_sync_queue);
-               break;
-            }
-            usleep(sleep_time_us);
-            sleep_time_us = sleep_time_us*2 > max_sleep_time_us ? max_sleep_time_us : sleep_time_us*2;
-         }
-      } else {
-         enqueue_buffer( sb, no_reason, to_sync_queue);
-      }
+      enqueue_buffer( sb, no_reason, to_sync_queue);
       return sb->size();
    }
 

From e1c1d429207e037ed95d1d42a0fc2756baa5344d Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 25 Aug 2023 17:09:51 -0500
Subject: [PATCH 07/61] Move block sync rate limit parsing to plugin
 initialize.

---
 plugins/net_plugin/net_plugin.cpp | 69 ++++++++++++++++---------------
 1 file changed, 35 insertions(+), 34 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index b8bd0e0217..61e9b1a94a 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -476,6 +476,7 @@ namespace eosio {
       std::function<void()> increment_dropped_trxs;
       
    private:
+      static const std::map<std::string, int> prefix_multipliers;
       alignas(hardware_destructive_interference_size)
       mutable fc::mutex             chain_info_mtx; // protects chain_info_t
       chain_info_t                  chain_info GUARDED_BY(chain_info_mtx);
@@ -530,14 +531,15 @@ namespace eosio {
 
       constexpr static uint16_t to_protocol_version(uint16_t v);
 
+      size_t parse_connection_limit(const string& limit_str);
       void plugin_initialize(const variables_map& options);
       void plugin_startup();
       void plugin_shutdown();
       bool in_sync() const;
       fc::logger& get_logger() { return logger; }
 
-      void create_session(tcp::socket&& socket, const string listen_address, const string& limit);
-   };
+      void create_session(tcp::socket&& socket, const string listen_address, size_t limit);
+   }; //net_plugin_impl
 
    // peer_[x]log must be called from thread in connection strand
 #define peer_dlog( PEER, FORMAT, ... ) \
@@ -772,7 +774,7 @@ namespace eosio {
       /// @brief ctor
       /// @param socket created by boost::asio in fc::listener
       /// @param address identifier of listen socket which accepted this new connection
-      explicit connection( tcp::socket&& socket, const string& listen_address, const string& limit_str );
+      explicit connection( tcp::socket&& socket, const string& listen_address, size_t block_sync_rate_limit );
       ~connection() = default;
 
       connection( const connection& ) = delete;
@@ -788,7 +790,6 @@ namespace eosio {
       static std::string state_str(connection_state s);
       const string& peer_address() const { return peer_addr; } // thread safe, const
 
-      void set_connection_limit( const string& limit_str );
       void set_connection_type( const string& peer_addr );
       bool is_transactions_only_connection()const { return connection_type == transactions_only; } // thread safe, atomic
       bool is_blocks_only_connection()const { return connection_type == blocks_only; }
@@ -812,7 +813,6 @@ namespace eosio {
 
    private:
       static const string unknown;
-      static const std::map<std::string, int> prefix_multipliers;
 
       std::atomic<uint64_t> peer_ping_time_ns = std::numeric_limits<uint64_t>::max();
 
@@ -1063,7 +1063,7 @@ namespace eosio {
    }; // class connection
 
    const string connection::unknown = "<unknown>";
-   const std::map<std::string, int> connection::prefix_multipliers{
+   const std::map<std::string, int> net_plugin_impl::prefix_multipliers{
       {"",1},{"K",pow(10,3)},{"M",pow(10,6)},{"G",pow(10, 9)},{"T",pow(10, 12)},
              {"Ki",pow(2,10)},{"Mi",pow(2,20)},{"Gi",pow(2,30)},{"Ti",pow(2,40)}
    };
@@ -1197,9 +1197,10 @@ namespace eosio {
       fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) );
    }
 
-   connection::connection(tcp::socket&& s, const string& listen_address, const string& limit_str)
+   connection::connection(tcp::socket&& s, const string& listen_address, size_t block_sync_rate_limit)
       : listen_address( listen_address ),
         peer_addr(),
+        block_sync_rate_limit(block_sync_rate_limit),
         strand( my_impl->thread_pool.get_executor() ),
         socket( new tcp::socket( std::move(s) ) ),
         connection_id( ++my_impl->current_connection_id ),
@@ -1207,7 +1208,6 @@ namespace eosio {
         last_handshake_recv(),
         last_handshake_sent()
    {
-      set_connection_limit(limit_str);
       update_endpoints();
       fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) );
    }
@@ -1238,30 +1238,6 @@ namespace eosio {
       }
    }
 
-   void connection::set_connection_limit( const std::string& limit_str) {
-      std::istringstream in(limit_str);
-      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
-      in.imbue(std::locale(""));
-      double limit{0};
-      in >> limit;
-      if( limit > 0.0f ) {
-         std::string units;
-         in >> units;
-         std::regex units_regex{"([KMGT]?[i]?)B/s"};
-         std::smatch units_match;
-         std::regex_match(units, units_match, units_regex);
-         try {
-            block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
-         } catch (boost::numeric::bad_numeric_cast&) {
-            fc_wlog( logger, "listen address ${la} block sync limit specification overflowed, connection will not be throttled", ("la", listen_address));
-            block_sync_rate_limit = 0;
-         }
-      } else {
-         block_sync_rate_limit = 0;
-      }
-   }
-
    // called from connection strand
    void connection::set_connection_type( const std::string& peer_add ) {      
       auto [host, port, type] = split_host_port_type(peer_add);
@@ -2767,7 +2743,7 @@ namespace eosio {
    }
 
 
-   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, const string& limit) {
+   void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, size_t limit) {
       uint32_t                  visitors  = 0;
       uint32_t                  from_addr = 0;
       boost::system::error_code rec;
@@ -4039,6 +4015,29 @@ namespace eosio {
       return fc::json::from_string(s).as<T>();
    }
 
+   size_t net_plugin_impl::parse_connection_limit( const std::string& limit_str) {
+      std::istringstream in(limit_str);
+      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
+      in.imbue(std::locale(""));
+      double limit{0};
+      in >> limit;
+      size_t block_sync_rate_limit = 0;
+      if( limit > 0.0f ) {
+         std::string units;
+         in >> units;
+         std::regex units_regex{"([KMGT]?[i]?)B/s"};
+         std::smatch units_match;
+         std::regex_match(units, units_match, units_regex);
+         try {
+            block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
+            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
+         } catch (boost::numeric::bad_numeric_cast&) {
+            EOS_ASSERT(false, plugin_config_exception, "block sync limit specification overflowed: ${limit}", ("limit", limit_str));
+         }
+      }
+      return block_sync_rate_limit;
+   }
+
    void net_plugin_impl::plugin_initialize( const variables_map& options ) {
       try {
          fc_ilog( logger, "Initialize net plugin" );
@@ -4257,9 +4256,11 @@ namespace eosio {
                   limit = std::string(address, last_colon_location+1);
                }
 
+               auto block_sync_rate_limit = my->parse_connection_limit(limit);
+
                fc::create_listener<tcp>(
                      my->thread_pool.get_executor(), logger, accept_timeout, listen_addr, extra_listening_log_info,
-                     [my = my, addr = p2p_addr, limit = limit](tcp::socket&& socket) { fc_dlog( logger, "start listening on ${addr} with peer sync throttle ${limit}", ("addr", addr)("limit", limit)); my->create_session(std::move(socket), addr, limit); });
+                     [my = my, addr = p2p_addr, block_sync_rate_limit = block_sync_rate_limit](tcp::socket&& socket) { fc_dlog( logger, "start listening on ${addr} with peer sync throttle ${limit}", ("addr", addr)("limit", block_sync_rate_limit)); my->create_session(std::move(socket), addr, block_sync_rate_limit); });
             } catch (const std::exception& e) {
                fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}",
                      ("addr", address)("what", e.what()) );

From 92e4e7cefd8098f1da8f2f68158d4248387f57f2 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 29 Aug 2023 19:24:26 -0500
Subject: [PATCH 08/61] Require IPv6 addresses to be in square bracket format.

Fix parsing and overflow problems and address peer review comments.
Extend throttle test to add another throttle prefix.
---
 plugins/net_plugin/net_plugin.cpp | 39 +++++++++++++++++++------------
 tests/p2p_sync_throttle_test.py   |  4 ++++
 2 files changed, 28 insertions(+), 15 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 61e9b1a94a..565538e6e5 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -476,7 +476,10 @@ namespace eosio {
       std::function<void()> increment_dropped_trxs;
       
    private:
-      static const std::map<std::string, int> prefix_multipliers;
+      inline static const std::map<std::string, size_t> prefix_multipliers{
+         {"",1},{"K",pow(10,3)},{"M",pow(10,6)},{"G",pow(10, 9)},{"T",pow(10, 12)},
+                {"Ki",pow(2,10)},{"Mi",pow(2,20)},{"Gi",pow(2,30)},{"Ti",pow(2,40)}
+      };
       alignas(hardware_destructive_interference_size)
       mutable fc::mutex             chain_info_mtx; // protects chain_info_t
       chain_info_t                  chain_info GUARDED_BY(chain_info_mtx);
@@ -531,7 +534,7 @@ namespace eosio {
 
       constexpr static uint16_t to_protocol_version(uint16_t v);
 
-      size_t parse_connection_limit(const string& limit_str);
+      size_t parse_connection_rate_limit(const string& limit_str);
       void plugin_initialize(const variables_map& options);
       void plugin_startup();
       void plugin_shutdown();
@@ -1063,10 +1066,6 @@ namespace eosio {
    }; // class connection
 
    const string connection::unknown = "<unknown>";
-   const std::map<std::string, int> net_plugin_impl::prefix_multipliers{
-      {"",1},{"K",pow(10,3)},{"M",pow(10,6)},{"G",pow(10, 9)},{"T",pow(10, 12)},
-             {"Ki",pow(2,10)},{"Mi",pow(2,20)},{"Gi",pow(2,30)},{"Ti",pow(2,40)}
-   };
 
    // called from connection strand
    struct msg_handler : public fc::visitor<void> {
@@ -4015,12 +4014,13 @@ namespace eosio {
       return fc::json::from_string(s).as<T>();
    }
 
-   size_t net_plugin_impl::parse_connection_limit( const std::string& limit_str) {
+   size_t net_plugin_impl::parse_connection_rate_limit( const std::string& limit_str) {
       std::istringstream in(limit_str);
-      fc_dlog( logger, "parsing connection endpoint with locale ${l}", ("l", std::locale("").name()));
+      fc_dlog( logger, "parsing connection endpoint limit ${limit} with locale ${l}", ("limit", limit_str)("l", std::locale("").name()));
       in.imbue(std::locale(""));
       double limit{0};
       in >> limit;
+      EOS_ASSERT(limit >= 0, plugin_config_exception, "block sync rate limit must be positive: ${limit}", ("limit", limit_str));
       size_t block_sync_rate_limit = 0;
       if( limit > 0.0f ) {
          std::string units;
@@ -4029,10 +4029,11 @@ namespace eosio {
          std::smatch units_match;
          std::regex_match(units, units_match, units_regex);
          try {
+            EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
             block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            fc_dlog( logger, "setting block_sync_rate_limit to ${limit}", ("limit", block_sync_rate_limit));
+            fc_dlog( logger, "setting block_sync_rate_limit to ${limit} bytes per second", ("limit", block_sync_rate_limit));
          } catch (boost::numeric::bad_numeric_cast&) {
-            EOS_ASSERT(false, plugin_config_exception, "block sync limit specification overflowed: ${limit}", ("limit", limit_str));
+            EOS_ASSERT(false, plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
          }
       }
       return block_sync_rate_limit;
@@ -4250,13 +4251,21 @@ namespace eosio {
 
                auto listen_addr = address;
                auto limit = string("0");
-               if( std::count(address.begin(), address.end(), ':') > 1 ) {
-                  auto last_colon_location = address.rfind(':');
-                  listen_addr = std::string(address, 0, last_colon_location);
-                  limit = std::string(address, last_colon_location+1);
+               auto last_colon_location = address.rfind(':');
+               if( auto right_bracket_location = address.find(']'); right_bracket_location != address.npos ) {
+                  if( std::count(address.begin()+right_bracket_location, address.end(), ':') > 1 ) {
+                     listen_addr = std::string(address, 0, last_colon_location);
+                     limit = std::string(address, last_colon_location+1);
+                  }
+               } else {
+                  if( auto colon_count = std::count(address.begin(), address.end(), ':'); colon_count > 1 ) {
+                     EOS_ASSERT( colon_count <= 2, plugin_config_exception, "Invalid address specification ${addr}; IPv6 addresses must be enclosed in square brackets.", ("addr", address));
+                     listen_addr = std::string(address, 0, last_colon_location);
+                     limit = std::string(address, last_colon_location+1);
+                  }
                }
 
-               auto block_sync_rate_limit = my->parse_connection_limit(limit);
+               auto block_sync_rate_limit = my->parse_connection_rate_limit(limit);
 
                fc::create_listener<tcp>(
                      my->thread_pool.get_executor(), logger, accept_timeout, listen_addr, extra_listening_log_info,
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 6c9d059bf0..a319d934f3 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -96,7 +96,11 @@
 
     throttlingNode = cluster.unstartedNodes[0]
     i = throttlingNode.cmd.index('--p2p-listen-endpoint')
+    throttleListenAddr = throttlingNode.cmd[i+1]
     throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':40000B/s'
+    throttleListenIP, throttleListenPort = throttleListenAddr.split(':')
+    throttlingNode.cmd.append('--p2p-listen-endpoint')
+    throttlingNode.cmd.append(f'{throttleListenIP}:{int(throttleListenPort)+100}:1TB/s')
 
     cluster.biosNode.kill(signal.SIGTERM)
     clusterStart = time.time()

From 28bb38d2e52a389e68bb761f2a69442a233efff3 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 30 Aug 2023 16:05:40 -0500
Subject: [PATCH 09/61] Added throttle exception for configured
 p2p-peer-addresses.

Added additional code comments.
Addressed peer review comment.
---
 .../include/eosio/net_plugin/protocol.hpp     |  3 +-
 plugins/net_plugin/net_plugin.cpp             | 35 +++++++++++++++----
 tests/p2p_sync_throttle_test.py               | 10 ++++++
 3 files changed, 40 insertions(+), 8 deletions(-)

diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
index 7e292e7bf2..d37fdbc18d 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp
@@ -16,7 +16,8 @@ namespace eosio {
 
    // Longest domain name is 253 characters according to wikipedia.
    // Addresses include ":port" where max port is 65535, which adds 6 chars.
-   // Addresses may also include ":bitrate" with suffix and separators, which adds 30 chars.
+   // Addresses may also include ":bitrate" with suffix and separators, which adds 30 chars,
+   // for the maximum comma-separated value that fits in a size_t expressed in decimal plus a suffix.
    // We also add our own extentions of "[:trx|:blk] - xxxxxxx", which adds 14 chars, total= 273.
    // Allow for future extentions as well, hence 384.
    constexpr size_t max_p2p_address_length = 253 + 6 + 30;
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 565538e6e5..0dffbad4a9 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -387,6 +387,9 @@ namespace eosio {
       std::optional<connection_status> status(const string& host) const;
       vector<connection_status> connection_statuses() const;
 
+      template <typename Function>
+      void for_each_supplied_peer(Function&& f) const;
+
       template <typename Function>
       void for_each_connection(Function&& f) const;
 
@@ -1143,6 +1146,11 @@ namespace eosio {
    }
 
 
+   template<typename Function>
+   void connections_manager::for_each_supplied_peer( Function&& f ) const {
+      std::for_each(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
+   }
+
    template<typename Function>
    void connections_manager::for_each_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
@@ -2768,6 +2776,16 @@ namespace eosio {
                visitors < connections.get_max_client_count())) {
             fc_ilog(logger, "Accepted new connection: " + paddr_str);
 
+            connections.for_each_supplied_peer([&listen_address, &paddr_str, &limit](const string& peer_addr) {
+               auto [host, port, type] = split_host_port_type(peer_addr);
+               if (host == paddr_str) {
+                  if (limit > 0) {
+                     fc_dlog(logger, "Connection inbound to ${la} from ${a} is a configured p2p-peer-address and will not be throttled", ("la", listen_address)("a", paddr_str));
+                  }
+                  limit = 0;
+               }
+            });
+
             connection_ptr new_connection = std::make_shared<connection>(std::move(socket), listen_address, limit);
             new_connection->strand.post([new_connection, this]() {
                if (new_connection->start_session()) {
@@ -3951,14 +3969,17 @@ namespace eosio {
    {
       cfg.add_options()
          ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876:0")) ), "The actual host:port[:<rate-cap>] used to listen for incoming p2p connections. May be used multiple times. "
-           "  The optional rate cap will limit block sync bandwidth to the specified rate.  A number alone will be "
-           "  interpreted as bytes per second.  The number may be suffixed with units.  Supported units are: "
+           "  The optional rate cap will limit per connection block sync bandwidth to the specified rate.  Total "
+           "  allowed bandwidth is the rate-cap multiplied by the connection count limit.  A number alone will be "
+           "  interpreted as bytes per second.  The number is parsed locale-aware and may include thousands and "
+           "  decimal separators.  It may also be suffixed with units.  Supported units are: "
            "  'B/s', 'KB/s', 'MB/s, 'GB/s', 'TB/s', 'KiB/s', 'MiB/s', 'GiB/s', 'TiB/s'."
            "  Transactions and blocks outside of sync mode are not throttled."
            "  Examples:\n"
            "    192.168.0.100:9876:1MiB/s\n"
-           "    node.eos.io:9876:250KB/s\n"
-           "    node.eos.io:9876:0.5GB/s")
+           "    node.eos.io:9876:1,512KB/s\n"
+           "    node.eos.io:9876:0.5GB/s\n"
+           "    [2001:db8:85a3:8d3:1319:8a2e:370:7348]:9876:250KB/s")
          ( "p2p-server-address", bpo::value< vector<string> >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.")
          ( "p2p-peer-address", bpo::value< vector<string> >()->composing(),
            "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n"
@@ -4020,9 +4041,9 @@ namespace eosio {
       in.imbue(std::locale(""));
       double limit{0};
       in >> limit;
-      EOS_ASSERT(limit >= 0, plugin_config_exception, "block sync rate limit must be positive: ${limit}", ("limit", limit_str));
+      EOS_ASSERT(limit >= 0.0, plugin_config_exception, "block sync rate limit must be positive: ${limit}", ("limit", limit_str));
       size_t block_sync_rate_limit = 0;
-      if( limit > 0.0f ) {
+      if( limit > 0.0 ) {
          std::string units;
          in >> units;
          std::regex units_regex{"([KMGT]?[i]?)B/s"};
@@ -4033,7 +4054,7 @@ namespace eosio {
             block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
             fc_dlog( logger, "setting block_sync_rate_limit to ${limit} bytes per second", ("limit", block_sync_rate_limit));
          } catch (boost::numeric::bad_numeric_cast&) {
-            EOS_ASSERT(false, plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
+            EOS_THROW(plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
          }
       }
       return block_sync_rate_limit;
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index a319d934f3..0de560d44e 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -50,6 +50,8 @@
         extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)])
     else:
         extraNodeosArgs = ''
+    # Custom topology is a line of singlely connected nodes from highest node number in sequence to lowest,
+    # the reverse of the usual TestHarness line topology.
     if cluster.launch(pnodes=pnodes, unstartedNodes=2, totalNodes=total_nodes, prodCount=prod_count, 
                       topo='./tests/p2p_sync_throttle_test_shape.json', delay=delay, 
                       extraNodeosArgs=extraNodeosArgs) is False:
@@ -97,6 +99,9 @@
     throttlingNode = cluster.unstartedNodes[0]
     i = throttlingNode.cmd.index('--p2p-listen-endpoint')
     throttleListenAddr = throttlingNode.cmd[i+1]
+    # Using 40000 bytes per second to allow syncing of 10,000 byte blocks resulting from
+    # the trx generators in a reasonable amount of time, while still being reliably
+    # distinguishable from unthrottled throughput.
     throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':40000B/s'
     throttleListenIP, throttleListenPort = throttleListenAddr.split(':')
     throttlingNode.cmd.append('--p2p-listen-endpoint')
@@ -108,12 +113,17 @@
 
     throttledNode = cluster.getNode(3)
     time.sleep(15)
+    # Throttling node was offline during block generation and once online receives blocks as fast as possible while
+    # transmitting blocks to the next node in line at the above throttle setting.
     assert throttlingNode.waitForBlock(endLargeBlocksHeadBlock), f'wait for block {endLargeBlocksHeadBlock}  on throttled node timed out'
     endThrottlingSync = time.time()
+    # Throttled node is connecting to a listen port with a block sync throttle applied so it will receive
+    # blocks more slowly during syncing than an unthrottled node.
     assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
     endThrottledSync = time.time()
     Print(f'Unthrottled sync time: {endThrottlingSync - clusterStart} seconds')
     Print(f'Throttled sync time: {endThrottledSync - clusterStart} seconds')
+    # 15 seconds chosen as the minimum reasonable sync time differential given the throttle and the average block size.
     assert endThrottledSync - clusterStart > endThrottlingSync - clusterStart + 15, 'Throttled sync time must be at least 15 seconds greater than unthrottled'
 
     testSuccessful=True

From e998cc6ecdfd510a5cf63d89a2f7e819b19ed6e8 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Mon, 11 Sep 2023 11:27:26 -0500
Subject: [PATCH 10/61] Debug commit, doesn't build.

---
 .../eosio/net_plugin/auto_bp_peering.hpp      |   6 +-
 plugins/net_plugin/net_plugin.cpp             | 267 +++++++++++-------
 .../tests/auto_bp_peering_unittest.cpp        |  44 +--
 3 files changed, 185 insertions(+), 132 deletions(-)

diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
index b5122f80aa..8a4f736680 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
@@ -145,7 +145,7 @@ class bp_connection_manager {
    // Only called from connection strand
    std::size_t num_established_clients() const {
       uint32_t num_clients = 0;
-      self()->connections.for_each_connection([&num_clients](auto&& conn) {
+      self()->connections.for_each_connection([&num_clients](const std::shared_ptr<Connection>& conn) {
          if (established_client_connection(conn)) {
             ++num_clients;
          }
@@ -157,7 +157,7 @@ class bp_connection_manager {
    // Only called from connection strand
    // This should only be called after the first handshake message is received to check if an incoming connection
    // has exceeded the pre-configured max_client_count limit.
-   bool exceeding_connection_limit(Connection* new_connection) const {
+   bool exceeding_connection_limit(std::shared_ptr<Connection> new_connection) const {
       return auto_bp_peering_enabled() && self()->connections.get_max_client_count() != 0 &&
              established_client_connection(new_connection) && num_established_clients() > self()->connections.get_max_client_count();
    }
@@ -182,7 +182,7 @@ class bp_connection_manager {
 
                fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}",
                        ("pending_downstream_neighbors", to_string(pending_downstream_neighbors)));
-               for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); }
+               for (auto neighbor : pending_downstream_neighbors) { self()->connections.resolve_and_connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); }
 
                pending_neighbors = std::move(pending_downstream_neighbors);
                finder.add_upstream_neighbors(pending_neighbors);
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 0dffbad4a9..5021096ced 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -24,6 +24,7 @@
 #include <boost/asio/ip/tcp.hpp>
 #include <boost/asio/ip/host_name.hpp>
 #include <boost/asio/steady_timer.hpp>
+#include <boost/multi_index/key.hpp>
 
 #include <atomic>
 #include <cmath>
@@ -69,6 +70,7 @@ namespace eosio {
    using boost::asio::ip::address_v4;
    using boost::asio::ip::host_name;
    using boost::multi_index_container;
+   using namespace boost::multi_index;
 
    using fc::time_point;
    using fc::time_point_sec;
@@ -334,9 +336,37 @@ namespace eosio {
    constexpr uint32_t packed_transaction_which = fc::get_index<net_message, packed_transaction>(); // see protocol net_message
 
    class connections_manager {
+   public:
+      struct connection_details {
+         std::string host;
+         connection_ptr c;
+         tcp::endpoint active_ip;
+         std::vector<tcp::endpoint> ips;
+         operator const connection_ptr&() const { return c; }
+      };
+
+      using connection_details_index = multi_index_container<
+         connection_details,
+         indexed_by<
+            ordered_unique<
+               tag<struct by_host>,
+               key<&connection_details::host>
+            >,
+            ordered_unique<
+               tag<struct by_connection>,
+               key<&connection_details::c>
+            >,
+            ordered_non_unique<
+               tag<struct by_active_ip>,
+               key<&connection_details::active_ip>
+            >
+         >
+      >;
+   private:
       alignas(hardware_destructive_interference_size)
       mutable std::shared_mutex        connections_mtx;
-      chain::flat_set<connection_ptr>  connections;
+      //chain::flat_set<connection_ptr>  connections GUARDED_BY(connections_mtx);
+      connection_details_index         peer_ips GUARDED_BY(connections_mtx);
       chain::flat_set<string>          supplied_peers;
 
       alignas(hardware_destructive_interference_size)
@@ -352,8 +382,6 @@ namespace eosio {
 
    private: // must call with held mutex
       connection_ptr find_connection_i(const string& host) const;
-      void add_i(connection_ptr&& c);
-      void connect_i(const string& peer, const string& p2p_address);
 
       void connection_monitor(const std::weak_ptr<connection>& from_connection);
 
@@ -380,7 +408,7 @@ namespace eosio {
       void stop_conn_timer();
 
       void add(connection_ptr c);
-      string connect(const string& host, const string& p2p_address);
+      string resolve_and_connect(const string& host, const string& p2p_address);
       string disconnect(const string& host);
       void close_all();
 
@@ -401,7 +429,7 @@ namespace eosio {
 
       template <typename UnaryPredicate>
       bool any_of_block_connections(UnaryPredicate&& p) const;
-   };
+   }; // connections_manager
 
    class net_plugin_impl : public std::enable_shared_from_this<net_plugin_impl>,
                            public auto_bp_peering::bp_connection_manager<net_plugin_impl, connection> {
@@ -829,7 +857,6 @@ namespace eosio {
 
       std::atomic<connection_state> conn_state{connection_state::connecting};
 
-      string                  listen_address; // address sent to peer in handshake
       const string            peer_addr;
       enum connection_types : char {
          both,
@@ -862,6 +889,7 @@ namespace eosio {
 
       fc::sha256              conn_node_id;
       string                  short_conn_node_id;
+      string                  listen_address; // address sent to peer in handshake
       string                  log_p2p_address;
       string                  log_remote_endpoint_ip;
       string                  log_remote_endpoint_port;
@@ -940,13 +968,16 @@ namespace eosio {
 
       bool process_next_block_message(uint32_t message_length);
       bool process_next_trx_message(uint32_t message_length);
-      void update_endpoints();
+      void update_endpoints(const tcp::endpoint& endpoint = tcp::endpoint());
    public:
 
       bool populate_handshake( handshake_message& hello ) const;
 
-      bool resolve_and_connect();
-      void connect( const std::shared_ptr<tcp::resolver>& resolver, const tcp::resolver::results_type& endpoints );
+//      bool connect();
+      //typedef boost:multi_index::index<connections_manager,
+      void connect( const tcp::resolver::results_type& endpoints,
+                    connections_manager::connection_details_index& connections,
+                    connections_manager::connection_details_index::const_iterator conn_details );
       void start_read_message();
 
       /** \brief Process the next message from the pending message buffer
@@ -1148,19 +1179,22 @@ namespace eosio {
 
    template<typename Function>
    void connections_manager::for_each_supplied_peer( Function&& f ) const {
+      std::shared_lock g( connections_mtx );
       std::for_each(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
-      std::for_each(connections.begin(), connections.end(), std::forward<Function>(f));
+      auto& index = peer_ips.get<by_host>();
+      std::for_each(index.begin(), index.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_block_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
-      for( auto& c : connections ) {
+      auto& index = peer_ips.get<by_host>();
+      for( const connection_ptr& c : index ) {
          if (c->is_blocks_connection()) {
             f(c);
          }
@@ -1170,13 +1204,15 @@ namespace eosio {
    template <typename UnaryPredicate>
    bool connections_manager::any_of_connections(UnaryPredicate&& p) const {
       std::shared_lock g(connections_mtx);
-      return std::any_of(connections.cbegin(), connections.cend(), std::forward<UnaryPredicate>(p));
+      auto& index = peer_ips.get<by_host>();
+      return std::any_of(index.cbegin(), index.cend(), std::forward<UnaryPredicate>(p));
    }
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const {
       std::shared_lock g( connections_mtx );
-      for( auto& c : connections ) {
+      auto& index = peer_ips.get<by_host>();
+      for( const connection_ptr& c : index ) {
          if( c->is_blocks_connection() ) {
             if (p(c))
               return true;
@@ -1189,10 +1225,10 @@ namespace eosio {
    //---------------------------------------------------------------------------
 
    connection::connection( const string& endpoint, const string& listen_address )
-      : listen_address( listen_address ),
-        peer_addr( endpoint ),
+      : peer_addr( endpoint ),
         strand( my_impl->thread_pool.get_executor() ),
         socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ),
+        listen_address( listen_address ),
         log_p2p_address( endpoint ),
         connection_id( ++my_impl->current_connection_id ),
         response_expected_timer( my_impl->thread_pool.get_executor() ),
@@ -1205,11 +1241,11 @@ namespace eosio {
    }
 
    connection::connection(tcp::socket&& s, const string& listen_address, size_t block_sync_rate_limit)
-      : listen_address( listen_address ),
-        peer_addr(),
+      : peer_addr(),
         block_sync_rate_limit(block_sync_rate_limit),
         strand( my_impl->thread_pool.get_executor() ),
         socket( new tcp::socket( std::move(s) ) ),
+        listen_address( listen_address ),
         connection_id( ++my_impl->current_connection_id ),
         response_expected_timer( my_impl->thread_pool.get_executor() ),
         last_handshake_recv(),
@@ -1219,10 +1255,10 @@ namespace eosio {
       fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) );
    }
 
-   void connection::update_endpoints() {
+   void connection::update_endpoints(const tcp::endpoint& endpoint) {
       boost::system::error_code ec;
       boost::system::error_code ec2;
-      auto rep = socket->remote_endpoint(ec);
+      auto rep = endpoint == tcp::endpoint() ? socket->remote_endpoint(ec) : endpoint;
       auto lep = socket->local_endpoint(ec2);
       remote_endpoint_port = ec ? 0 : rep.port();
       log_remote_endpoint_ip = ec ? unknown : rep.address().to_string();
@@ -2099,7 +2135,7 @@ namespace eosio {
 
    // static, thread safe
    void sync_manager::send_handshakes() {
-      my_impl->connections.for_each_connection( []( auto& ci ) {
+      my_impl->connections.for_each_connection( []( const connection_ptr& ci ) {
          if( ci->current() ) {
             ci->send_handshake();
          }
@@ -2575,7 +2611,7 @@ namespace eosio {
    void dispatch_manager::bcast_transaction(const packed_transaction_ptr& trx) {
       trx_buffer_factory buff_factory;
       const fc::time_point_sec now{fc::time_point::now()};
-      my_impl->connections.for_each_connection( [this, &trx, &now, &buff_factory]( auto& cp ) {
+      my_impl->connections.for_each_connection( [this, &trx, &now, &buff_factory]( const connection_ptr& cp ) {
          if( !cp->is_transactions_connection() || !cp->current() ) {
             return;
          }
@@ -2674,7 +2710,8 @@ namespace eosio {
    //------------------------------------------------------------------------
 
    // called from any thread
-   bool connection::resolve_and_connect() {
+#if 0
+   bool connection::connect() {
       switch ( no_retry ) {
          case no_reason:
          case wrong_version:
@@ -2686,12 +2723,6 @@ namespace eosio {
             return false;
       }
 
-      string::size_type colon = peer_address().find(':');
-      if (colon == std::string::npos || colon == 0) {
-         fc_elog( logger, "Invalid peer address. must be \"host:port[:<blk>|<trx>]\": ${p}", ("p", peer_address()) );
-         return false;
-      }
-
       connection_ptr c = shared_from_this();
 
       if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) {
@@ -2703,17 +2734,15 @@ namespace eosio {
       }
 
       strand.post([c]() {
-         auto [host, port, type] = split_host_port_type(c->peer_address());
          c->set_connection_type( c->peer_address() );
 
-         auto resolver = std::make_shared<tcp::resolver>( my_impl->thread_pool.get_executor() );
          connection_wptr weak_conn = c;
          resolver->async_resolve(host, port, boost::asio::bind_executor( c->strand,
             [resolver, weak_conn, host = host, port = port]( const boost::system::error_code& err, const tcp::resolver::results_type& endpoints ) {
                auto c = weak_conn.lock();
                if( !c ) return;
                if( !err ) {
-                  c->connect( resolver, endpoints );
+                  c->connect( endpoints );
                } else {
                   fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
                            ("host", host)("port", port)( "error", err.message() ) );
@@ -2724,17 +2753,23 @@ namespace eosio {
       } );
       return true;
    }
-
+#endif
    // called from connection strand
-   void connection::connect( const std::shared_ptr<tcp::resolver>& resolver, const tcp::resolver::results_type& endpoints ) {
+   void connection::connect( const tcp::resolver::results_type& endpoints, 
+                             connections_manager::connection_details_index& connections,
+                             connections_manager::connection_details_index::const_iterator conn_details ) {
       set_state(connection_state::connecting);
       pending_message_buffer.reset();
       buffer_queue.clear_out_queue();
       boost::asio::async_connect( *socket, endpoints,
          boost::asio::bind_executor( strand,
-               [resolver, c = shared_from_this(), socket=socket]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
+               [c = shared_from_this(), socket=socket, connections, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
             if( !err && socket->is_open() && socket == c->socket ) {
-               c->update_endpoints();
+               auto& index = connections.get<by_active_ip>();
+               index.modify_key(connections.project<by_active_ip>(conn_details), [endpoint](tcp::endpoint& e) {
+                  e = endpoint;
+               });
+               c->update_endpoints(endpoint);
                if( c->start_session() ) {
                   c->send_handshake();
                   c->send_time();
@@ -2760,7 +2795,7 @@ namespace eosio {
          fc_elog(logger, "Error getting remote endpoint: ${m}", ("m", rec.message()));
       } else {
          paddr_str        = paddr_add.to_string();
-         connections.for_each_connection([&visitors, &from_addr, &paddr_str](auto& conn) {
+         connections.for_each_connection([&visitors, &from_addr, &paddr_str](const connection_ptr& conn) {
             if (conn->socket_is_open()) {
                if (conn->peer_address().empty()) {
                   ++visitors;
@@ -3216,7 +3251,7 @@ namespace eosio {
          log_p2p_address = msg.p2p_address;
 
          my_impl->mark_bp_connection(this);
-         if (my_impl->exceeding_connection_limit(this)) {
+         if (my_impl->exceeding_connection_limit(shared_from_this())) {
             // When auto bp peering is enabled, create_session() check doesn't have enough information to determine
             // if a client is a BP peer. In create_session(), it only has the peer address which a node is connecting
             // from, but it would be different from the address it is listening. The only way to make sure is when the
@@ -3233,7 +3268,7 @@ namespace eosio {
                set_connection_type( msg.p2p_address );
 
             peer_dlog( this, "checking for duplicate" );
-            auto is_duplicate = [&](const auto& check) {
+            auto is_duplicate = [&](const connection_ptr& check) {
                if(check.get() == this)
                   return false;
                fc::unique_lock g_check_conn( check->conn_mtx );
@@ -3787,7 +3822,7 @@ namespace eosio {
             }
 
             auto current_time = std::chrono::system_clock::now();
-            my->connections.for_each_connection( [current_time]( auto& c ) {
+            my->connections.for_each_connection( [current_time]( const connection_ptr& c ) {
                if( c->socket_is_open() ) {
                   c->strand.post([c, current_time]() {
                      c->check_heartbeat(current_time);
@@ -4340,7 +4375,7 @@ namespace eosio {
 
    /// RPC API
    string net_plugin::connect( const string& host ) {
-      return my->connections.connect( host, *my->p2p_addresses.begin() );
+      return my->connections.resolve_and_connect( host, *my->p2p_addresses.begin() );
    }
 
    /// RPC API
@@ -4386,7 +4421,7 @@ namespace eosio {
 
    size_t connections_manager::number_connections() const {
       std::lock_guard g(connections_mtx);
-      return connections.size();
+      return peer_ips.size();
    }
 
    void connections_manager::add_supplied_peers(const vector<string>& peers ) {
@@ -4415,9 +4450,8 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
-      std::lock_guard g(connections_mtx);
       for (const auto& peer : supplied_peers) {
-         connect_i(peer, p2p_address);
+         resolve_and_connect(peer, p2p_address);
       }
    }
 
@@ -4427,23 +4461,54 @@ namespace eosio {
    }
 
    // called by API
-   string connections_manager::connect( const string& host, const string& p2p_address ) {
+   string connections_manager::resolve_and_connect( const string& peer_address, const string& listen_address ) {
+      string::size_type colon = peer_address.find(':');
+      if (colon == std::string::npos || colon == 0) {
+         fc_elog( logger, "Invalid peer address. must be \"host:port[:<blk>|<trx>]\": ${p}", ("p", peer_address) );
+         return "invalid peer address";
+      }
+
       std::lock_guard g( connections_mtx );
-      if( find_connection_i( host ) )
+      if( find_connection_i( peer_address ) )
          return "already connected";
 
-      connect_i( host, p2p_address );
-      supplied_peers.insert(host);
+      supplied_peers.insert(peer_address);
+      auto [host, port, type] = split_host_port_type(peer_address);
+
+      auto resolver = std::make_shared<tcp::resolver>( my_impl->thread_pool.get_executor() );
+
+      resolver->async_resolve(host, port, 
+         [resolver, host = host, port = port, peer_address = peer_address, listen_address = listen_address, this]( const boost::system::error_code& err, const tcp::resolver::results_type& results ) {
+            connection_ptr c = std::make_shared<connection>( peer_address, listen_address );
+            c->set_heartbeat_timeout( heartbeat_timeout );
+            vector<tcp::endpoint> eps{results.begin(), results.end()};
+            std::lock_guard g( connections_mtx );
+            auto [it, inserted] = peer_ips.insert( connection_details{
+               .host = peer_address,
+               .c = std::move(c),
+               .ips = std::move(eps)
+            });
+            if( !err ) {
+               c->connect( results, peer_ips, it );
+            } else {
+               fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
+                        ("host", host)("port", port)( "error", err.message() ) );
+               c->set_state(connection::connection_state::closed);
+               ++c->consecutive_immediate_connection_close;
+            }
+      } );
+
       return "added connection";
    }
 
    // called by API
    string connections_manager::disconnect( const string& host ) {
       std::lock_guard g( connections_mtx );
-      if( auto c = find_connection_i( host ) ) {
-         fc_ilog( logger, "disconnecting: ${cid}", ("cid", c->connection_id) );
-         c->close();
-         connections.erase(c);
+      auto& index = peer_ips.get<by_host>();
+      if( auto i = index.find( host ); i != index.end() ) {
+         fc_ilog( logger, "disconnecting: ${cid}", ("cid", i->c->connection_id) );
+         i->c->close();
+         peer_ips.erase(i);
          supplied_peers.erase(host);
          return "connection removed";
       }
@@ -4451,13 +4516,14 @@ namespace eosio {
    }
 
    void connections_manager::close_all() {
-      fc_ilog( logger, "close all ${s} connections", ("s", connections.size()) );
+      auto& index = peer_ips.get<by_host>();
+      fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
       std::lock_guard g( connections_mtx );
-      for( auto& con : connections ) {
-         fc_dlog( logger, "close: ${cid}", ("cid", con->connection_id) );
-         con->close( false, true );
+      for( const connection_ptr& c : index ) {
+         fc_dlog( logger, "close: ${cid}", ("cid", c->connection_id) );
+         c->close( false, true );
       }
-      connections.clear();
+      peer_ips.clear();
    }
 
    std::optional<connection_status> connections_manager::status( const string& host )const {
@@ -4472,8 +4538,9 @@ namespace eosio {
    vector<connection_status> connections_manager::connection_statuses()const {
       vector<connection_status> result;
       std::shared_lock g( connections_mtx );
-      result.reserve( connections.size() );
-      for( const auto& c : connections ) {
+      auto& index = peer_ips.get<by_host>();
+      result.reserve( index.size() );
+      for( const connection_ptr& c : index ) {
          result.push_back( c->get_status() );
       }
       return result;
@@ -4481,29 +4548,13 @@ namespace eosio {
 
    // call with connections_mtx
    connection_ptr connections_manager::find_connection_i( const string& host )const {
-      for( const auto& c : connections ) {
-         if (c->peer_address() == host)
-            return c;
-      }
+      auto& index = peer_ips.get<by_host>();
+      auto iter = index.find(host);
+      if(iter != index.end())
+         return iter->c;
       return {};
    }
 
-   // call with connections_mtx
-   void connections_manager::connect_i( const string& host, const string& p2p_address ) {
-      connection_ptr c = std::make_shared<connection>( host, p2p_address );
-      fc_dlog( logger, "calling active connector: ${h}", ("h", host) );
-      if( c->resolve_and_connect() ) {
-         fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) );
-         add_i( std::move(c) );
-      }
-   }
-
-   // call with connections_mtx
-   void connections_manager::add_i(connection_ptr&& c) {
-      c->set_heartbeat_timeout( heartbeat_timeout );
-      connections.insert( std::move(c) );
-   }
-
    // called from any thread
    void connections_manager::start_conn_timer() {
       start_conn_timer(connector_period, {}); // this locks mutex
@@ -4535,13 +4586,14 @@ namespace eosio {
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
       auto from = from_connection.lock();
       std::unique_lock g( connections_mtx );
-      auto it = (from ? connections.find(from) : connections.begin());
-      if (it == connections.end()) it = connections.begin();
+      auto& index = peer_ips.get<by_connection>();
+      auto it = (from ? index.find(from) : index.begin());
+      if (it == index.end()) it = index.begin();
       size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;
-      net_plugin::p2p_per_connection_metrics per_connection(connections.size());
-      while (it != connections.end()) {
+      net_plugin::p2p_per_connection_metrics per_connection(index.size());
+      while (it != index.end()) {
          if (fc::time_point::now() >= max_time) {
-            connection_wptr wit = *it;
+            connection_wptr wit = (*it).c;
             g.unlock();
             fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) );
             fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}",
@@ -4549,42 +4601,43 @@ namespace eosio {
             start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting
             return;
          }
-         if ((*it)->is_bp_connection) {
+         const connection_ptr& c = it->c;
+         if (c->is_bp_connection) {
             ++num_bp_peers;
-         } else if ((*it)->incoming()) {
+         } else if (c->incoming()) {
             ++num_clients;
          } else {
             ++num_peers;
          }
          if (update_p2p_connection_metrics) {
-            fc::unique_lock g_conn((*it)->conn_mtx);
-            boost::asio::ip::address_v6::bytes_type addr = (*it)->remote_endpoint_ip_array;
+            fc::unique_lock g_conn(c->conn_mtx);
+            boost::asio::ip::address_v6::bytes_type addr = c->remote_endpoint_ip_array;
             g_conn.unlock();
             net_plugin::p2p_per_connection_metrics::connection_metric metrics{
-                 .connection_id = (*it)->connection_id
+                 .connection_id = c->connection_id
                , .address = addr
-               , .port = (*it)->get_remote_endpoint_port()
-               , .accepting_blocks = (*it)->is_blocks_connection()
-               , .last_received_block = (*it)->get_last_received_block_num()
-               , .first_available_block = (*it)->get_peer_start_block_num()
-               , .last_available_block = (*it)->get_peer_head_block_num()
-               , .unique_first_block_count = (*it)->get_unique_blocks_rcvd_count()
-               , .latency = (*it)->get_peer_ping_time_ns()
-               , .bytes_received = (*it)->get_bytes_received()
-               , .last_bytes_received = (*it)->get_last_bytes_received()
-               , .bytes_sent = (*it)->get_bytes_sent()
-               , .last_bytes_sent = (*it)->get_last_bytes_sent()
-               , .block_sync_bytes_sent = (*it)->get_block_sync_bytes_sent()
-               , .connection_start_time = (*it)->connection_start_time
-               , .log_p2p_address = (*it)->log_p2p_address
+               , .port = c->get_remote_endpoint_port()
+               , .accepting_blocks = c->is_blocks_connection()
+               , .last_received_block = c->get_last_received_block_num()
+               , .first_available_block = c->get_peer_start_block_num()
+               , .last_available_block = c->get_peer_head_block_num()
+               , .unique_first_block_count = c->get_unique_blocks_rcvd_count()
+               , .latency = c->get_peer_ping_time_ns()
+               , .bytes_received = c->get_bytes_received()
+               , .last_bytes_received = c->get_last_bytes_received()
+               , .bytes_sent = c->get_bytes_sent()
+               , .last_bytes_sent = c->get_last_bytes_sent()
+               , .block_sync_bytes_sent = c->get_block_sync_bytes_sent()
+               , .connection_start_time = c->connection_start_time
+               , .log_p2p_address = c->log_p2p_address
             };
             per_connection.peers.push_back(metrics);
          }
 
-         if (!(*it)->socket_is_open() && (*it)->state() != connection::connection_state::connecting) {
-            if (!(*it)->incoming()) {
-               if (!(*it)->resolve_and_connect()) {
-                  it = connections.erase(it);
+         if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
+            if (!c->incoming()) {
+               if (!resolve_and_connect(c->peer_address(), c->listen_address)) {
+                  it = index.erase(it);
                   --num_peers;
                   ++num_rm;
                   continue;
@@ -4592,7 +4645,7 @@ namespace eosio {
             } else {
                --num_clients;
                ++num_rm;
-               it = connections.erase(it);
+               it = index.erase(it);
                continue;
             }
          }
diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
index ddfeba7b1c..d9e0594793 100644
--- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
+++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
@@ -16,9 +16,9 @@ using namespace std::literals::string_literals;
 
 struct mock_connections_manager {
    uint32_t                     max_client_count = 0;
-   std::vector<mock_connection> connections;
+   std::vector<std::shared_ptr<mock_connection>> connections;
 
-   std::function<void(std::string, std::string)> connect;
+   std::function<void(std::string, std::string)> resolve_and_connect;
    std::function<void(std::string)> disconnect;
 
    uint32_t get_max_client_count() const { return max_client_count; }
@@ -26,7 +26,7 @@ struct mock_connections_manager {
    template <typename Function>
    void for_each_connection(Function&& func) const {
       for (auto c : connections) {
-         if (!func(&c))
+         if (!func(c))
             return;
       }
    }
@@ -166,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_on_pending_schedule) {
 
    std::vector<std::string> connected_hosts;
 
-   plugin.connections.connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); };
+   plugin.connections.resolve_and_connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); };
 
    // make sure nothing happens when it is not in_sync
    plugin.is_in_sync = false;
@@ -210,7 +210,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule1) {
    plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n };
 
    plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n };
-   plugin.connections.connect = [](std::string host, std::string p2p_address) {};
+   plugin.connections.resolve_and_connect = [](std::string host, std::string p2p_address) {};
 
    std::vector<std::string> disconnected_hosts;
    plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); };
@@ -246,7 +246,7 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule2) {
    plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n };
 
    plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n };
-   plugin.connections.connect = [](std::string host, std::string p2p_address) {};
+   plugin.connections.resolve_and_connect = [](std::string host, std::string p2p_address) {};
    std::vector<std::string> disconnected_hosts;
    plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); };
 
@@ -272,24 +272,24 @@ BOOST_AUTO_TEST_CASE(test_exceeding_connection_limit) {
    plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n };
    plugin.connections.max_client_count = 1;
    plugin.connections.connections = {
-      { .is_bp_connection = true, .is_open = true, .handshake_received = true },   // 0
-      { .is_bp_connection = true, .is_open = true, .handshake_received = false },  // 1
-      { .is_bp_connection = true, .is_open = false, .handshake_received = true },  // 2
-      { .is_bp_connection = true, .is_open = false, .handshake_received = false }, // 3
-      { .is_bp_connection = false, .is_open = true, .handshake_received = true },  // 4
-      { .is_bp_connection = false, .is_open = true, .handshake_received = false }, // 5
-      { .is_bp_connection = false, .is_open = true, .handshake_received = true },  // 6
-      { .is_bp_connection = false, .is_open = false, .handshake_received = false } // 7
+      std::make_shared<mock_connection>( true, true, true ),   // 0
+      std::make_shared<mock_connection>( true, true, false ),  // 1
+      std::make_shared<mock_connection>( true, false, true ),  // 2
+      std::make_shared<mock_connection>( true, false, false ), // 3
+      std::make_shared<mock_connection>( false, true, true ),  // 4
+      std::make_shared<mock_connection>( false, true, false ), // 5
+      std::make_shared<mock_connection>( false, true, true ),  // 6
+      std::make_shared<mock_connection>( false, false, false ) // 7
    };
 
    BOOST_CHECK_EQUAL(plugin.num_established_clients(), 2u);
 
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[0]));
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[1]));
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[2]));
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[3]));
-   BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections.connections[4]));
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[5]));
-   BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections.connections[6]));
-   BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[7]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[0]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[1]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[2]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[3]));
+   BOOST_CHECK(plugin.exceeding_connection_limit(plugin.connections.connections[4]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[5]));
+   BOOST_CHECK(plugin.exceeding_connection_limit(plugin.connections.connections[6]));
+   BOOST_CHECK(!plugin.exceeding_connection_limit(plugin.connections.connections[7]));
 }

From 92e402213e6c0d502f63a8088a5b6c6add5cdf0d Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Mon, 11 Sep 2023 15:53:19 -0500
Subject: [PATCH 11/61] Fix build error.  Lambda captures by value are const.

Update connections_manager::add method.
Clean up cruft and rename connection data structure back to
'connections'.
---
 plugins/net_plugin/net_plugin.cpp | 86 +++++++++++--------------------
 1 file changed, 30 insertions(+), 56 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 5021096ced..7f3159046b 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -365,8 +365,7 @@ namespace eosio {
    private:
       alignas(hardware_destructive_interference_size)
       mutable std::shared_mutex        connections_mtx;
-      //chain::flat_set<connection_ptr>  connections GUARDED_BY(connections_mtx);
-      connection_details_index         peer_ips GUARDED_BY(connections_mtx);
+      connection_details_index         connections GUARDED_BY(connections_mtx);
       chain::flat_set<string>          supplied_peers;
 
       alignas(hardware_destructive_interference_size)
@@ -973,8 +972,6 @@ namespace eosio {
 
       bool populate_handshake( handshake_message& hello ) const;
 
-//      bool connect();
-      //typedef boost:multi_index::index<connections_manager,
       void connect( const tcp::resolver::results_type& endpoints,
                     connections_manager::connection_details_index& connections,
                     connections_manager::connection_details_index::const_iterator conn_details );
@@ -1186,14 +1183,14 @@ namespace eosio {
    template<typename Function>
    void connections_manager::for_each_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       std::for_each(index.begin(), index.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_block_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if (c->is_blocks_connection()) {
             f(c);
@@ -1204,14 +1201,14 @@ namespace eosio {
    template <typename UnaryPredicate>
    bool connections_manager::any_of_connections(UnaryPredicate&& p) const {
       std::shared_lock g(connections_mtx);
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       return std::any_of(index.cbegin(), index.cend(), std::forward<UnaryPredicate>(p));
    }
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const {
       std::shared_lock g( connections_mtx );
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if( c->is_blocks_connection() ) {
             if (p(c))
@@ -2709,9 +2706,10 @@ namespace eosio {
 
    //------------------------------------------------------------------------
 
-   // called from any thread
-#if 0
-   bool connection::connect() {
+   // called from connection strand
+   void connection::connect( const tcp::resolver::results_type& endpoints, 
+                             connections_manager::connection_details_index& connections,
+                             connections_manager::connection_details_index::const_iterator conn_details ) {
       switch ( no_retry ) {
          case no_reason:
          case wrong_version:
@@ -2720,50 +2718,21 @@ namespace eosio {
             break;
          default:
             fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry )));
-            return false;
+            return;
       }
-
-      connection_ptr c = shared_from_this();
-
       if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) {
          fc::microseconds connector_period = my_impl->connections.get_connector_period();
          fc::lock_guard g( conn_mtx );
          if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) {
-            return true; // true so doesn't remove from valid connections
+            return;
          }
       }
-
-      strand.post([c]() {
-         c->set_connection_type( c->peer_address() );
-
-         connection_wptr weak_conn = c;
-         resolver->async_resolve(host, port, boost::asio::bind_executor( c->strand,
-            [resolver, weak_conn, host = host, port = port]( const boost::system::error_code& err, const tcp::resolver::results_type& endpoints ) {
-               auto c = weak_conn.lock();
-               if( !c ) return;
-               if( !err ) {
-                  c->connect( endpoints );
-               } else {
-                  fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
-                           ("host", host)("port", port)( "error", err.message() ) );
-                  c->set_state(connection_state::closed);
-                  ++c->consecutive_immediate_connection_close;
-               }
-         } ) );
-      } );
-      return true;
-   }
-#endif
-   // called from connection strand
-   void connection::connect( const tcp::resolver::results_type& endpoints, 
-                             connections_manager::connection_details_index& connections,
-                             connections_manager::connection_details_index::const_iterator conn_details ) {
       set_state(connection_state::connecting);
       pending_message_buffer.reset();
       buffer_queue.clear_out_queue();
       boost::asio::async_connect( *socket, endpoints,
          boost::asio::bind_executor( strand,
-               [c = shared_from_this(), socket=socket, connections, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
+               [c = shared_from_this(), socket=socket, &connections, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
             if( !err && socket->is_open() && socket == c->socket ) {
                auto& index = connections.get<by_active_ip>();
                index.modify_key(connections.project<by_active_ip>(conn_details), [endpoint](tcp::endpoint& e) {
@@ -4421,7 +4390,7 @@ namespace eosio {
 
    size_t connections_manager::number_connections() const {
       std::lock_guard g(connections_mtx);
-      return peer_ips.size();
+      return connections.size();
    }
 
    void connections_manager::add_supplied_peers(const vector<string>& peers ) {
@@ -4457,7 +4426,12 @@ namespace eosio {
 
    void connections_manager::add( connection_ptr c ) {
       std::lock_guard g( connections_mtx );
-      add_i( std::move(c) );
+      boost::system::error_code ec;
+      auto endpoint = c->socket->remote_endpoint(ec);
+      connections.insert( connection_details{
+         .host = c->peer_address(), 
+         .c = std::move(c),
+         .active_ip = endpoint} );
    }
 
    // called by API
@@ -4483,18 +4457,18 @@ namespace eosio {
             c->set_heartbeat_timeout( heartbeat_timeout );
             vector<tcp::endpoint> eps{results.begin(), results.end()};
             std::lock_guard g( connections_mtx );
-            auto [it, inserted] = peer_ips.insert( connection_details{
+            auto [it, inserted] = connections.insert( connection_details{
                .host = peer_address,
                .c = std::move(c),
                .ips = std::move(eps)
             });
             if( !err ) {
-               c->connect( results, peer_ips, it );
+               it->c->connect( results, connections, it );
             } else {
                fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
                         ("host", host)("port", port)( "error", err.message() ) );
-               c->set_state(connection::connection_state::closed);
-               ++c->consecutive_immediate_connection_close;
+               it->c->set_state(connection::connection_state::closed);
+               ++(it->c->consecutive_immediate_connection_close);
             }
       } );
 
@@ -4504,11 +4478,11 @@ namespace eosio {
    // called by API
    string connections_manager::disconnect( const string& host ) {
       std::lock_guard g( connections_mtx );
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       if( auto i = index.find( host ); i != index.end() ) {
          fc_ilog( logger, "disconnecting: ${cid}", ("cid", i->c->connection_id) );
          i->c->close();
-         peer_ips.erase(i);
+         connections.erase(i);
          supplied_peers.erase(host);
          return "connection removed";
       }
@@ -4516,14 +4490,14 @@ namespace eosio {
    }
 
    void connections_manager::close_all() {
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
       std::lock_guard g( connections_mtx );
       for( const connection_ptr& c : index ) {
          fc_dlog( logger, "close: ${cid}", ("cid", c->connection_id) );
          c->close( false, true );
       }
-      peer_ips.clear();
+      connections.clear();
    }
 
    std::optional<connection_status> connections_manager::status( const string& host )const {
@@ -4538,7 +4512,7 @@ namespace eosio {
    vector<connection_status> connections_manager::connection_statuses()const {
       vector<connection_status> result;
       std::shared_lock g( connections_mtx );
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       result.reserve( index.size() );
       for( const connection_ptr& c : index ) {
          result.push_back( c->get_status() );
@@ -4548,7 +4522,7 @@ namespace eosio {
 
    // call with connections_mtx
    connection_ptr connections_manager::find_connection_i( const string& host )const {
-      auto& index = peer_ips.get<by_host>();
+      auto& index = connections.get<by_host>();
       auto iter = index.find(host);
       if(iter != index.end())
          return iter->c;
@@ -4586,7 +4560,7 @@ namespace eosio {
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
       auto from = from_connection.lock();
       std::unique_lock g( connections_mtx );
-      auto& index = peer_ips.get<by_connection>();
+      auto& index = connections.get<by_connection>();
       auto it = (from ? index.find(from) : index.begin());
       if (it == index.end()) it = index.begin();
       size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;

From b24f8e3b847e3e819e6657b0f326f56ed3d1022b Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Mon, 11 Sep 2023 16:25:33 -0500
Subject: [PATCH 12/61] Fix bare numeric value for peer throttle.

---
 plugins/net_plugin/net_plugin.cpp | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 7f3159046b..87b120e0be 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4054,7 +4054,9 @@ namespace eosio {
          std::smatch units_match;
          std::regex_match(units, units_match, units_regex);
          try {
-            EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
+            if( units.length() > 0 ) {
+               EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
+            }
             block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
             fc_dlog( logger, "setting block_sync_rate_limit to ${limit} bytes per second", ("limit", block_sync_rate_limit));
          } catch (boost::numeric::bad_numeric_cast&) {

From 3f67034a596615c49a6fea41d6e29a55978d87e2 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Mon, 11 Sep 2023 19:08:09 -0500
Subject: [PATCH 13/61] Update supplied_peers only once per configured peer and
 once per API call.

---
 plugins/net_plugin/net_plugin.cpp | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 87b120e0be..bd6687f9a3 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -407,6 +407,7 @@ namespace eosio {
       void stop_conn_timer();
 
       void add(connection_ptr c);
+      string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
       string disconnect(const string& host);
       void close_all();
@@ -4346,7 +4347,7 @@ namespace eosio {
 
    /// RPC API
    string net_plugin::connect( const string& host ) {
-      return my->connections.resolve_and_connect( host, *my->p2p_addresses.begin() );
+      return my->connections.connect( host, *my->p2p_addresses.begin() );
    }
 
    /// RPC API
@@ -4437,6 +4438,11 @@ namespace eosio {
    }
 
    // called by API
+   string connections_manager::connect( const string& host, const string& p2p_address ) {
+      supplied_peers.insert(host);
+      return resolve_and_connect( host, p2p_address );
+   }
+
    string connections_manager::resolve_and_connect( const string& peer_address, const string& listen_address ) {
       string::size_type colon = peer_address.find(':');
       if (colon == std::string::npos || colon == 0) {
@@ -4448,7 +4454,6 @@ namespace eosio {
       if( find_connection_i( peer_address ) )
          return "already connected";
 
-      supplied_peers.insert(peer_address);
       auto [host, port, type] = split_host_port_type(peer_address);
 
       auto resolver = std::make_shared<tcp::resolver>( my_impl->thread_pool.get_executor() );

From e59451dbfb315a9475ec0e262e28b777e07fb469 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 15 Sep 2023 11:00:53 -0500
Subject: [PATCH 14/61] Remove encapsulation violation.

Address peer review comments.
---
 .../libfc/include/fc/exception/exception.hpp  |  6 ++-
 plugins/net_plugin/net_plugin.cpp             | 48 ++++++++++++++-----
 2 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/libraries/libfc/include/fc/exception/exception.hpp b/libraries/libfc/include/fc/exception/exception.hpp
index c3baa2c9d5..80fe7694e3 100644
--- a/libraries/libfc/include/fc/exception/exception.hpp
+++ b/libraries/libfc/include/fc/exception/exception.hpp
@@ -9,6 +9,9 @@
 #include <unordered_map>
 #include <boost/core/typeinfo.hpp>
 #include <boost/interprocess/exceptions.hpp>
+#include <boost/exception/error_info.hpp>
+#include <boost/stacktrace/stacktrace.hpp>
+#include <boost/exception/all.hpp>
 
 namespace fc
 {
@@ -394,7 +397,8 @@ namespace fc
  */
 #define FC_THROW_EXCEPTION( EXCEPTION, FORMAT, ... ) \
   FC_MULTILINE_MACRO_BEGIN \
-    throw EXCEPTION( FC_LOG_MESSAGE( error, FORMAT, __VA_ARGS__ ) ); \
+    using traced = boost::error_info<struct tag_stacktrace, boost::stacktrace::stacktrace>; \
+    throw boost::enable_error_info( EXCEPTION( FC_LOG_MESSAGE( error, FORMAT, __VA_ARGS__ ) ) ) << traced(boost::stacktrace::stacktrace()); \
   FC_MULTILINE_MACRO_END
 
 
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index bd6687f9a3..af86d9f31d 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -25,6 +25,8 @@
 #include <boost/asio/ip/host_name.hpp>
 #include <boost/asio/steady_timer.hpp>
 #include <boost/multi_index/key.hpp>
+#include <boost/stacktrace/stacktrace.hpp>
+#include <boost/exception/all.hpp>
 
 #include <atomic>
 #include <cmath>
@@ -365,7 +367,7 @@ namespace eosio {
    private:
       alignas(hardware_destructive_interference_size)
       mutable std::shared_mutex        connections_mtx;
-      connection_details_index         connections GUARDED_BY(connections_mtx);
+      connection_details_index         connections;
       chain::flat_set<string>          supplied_peers;
 
       alignas(hardware_destructive_interference_size)
@@ -409,6 +411,7 @@ namespace eosio {
       void add(connection_ptr c);
       string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
+      void update_connection_endpoint(connection_details_index::const_iterator it, const tcp::endpoint& endpoint);
       string disconnect(const string& host);
       void close_all();
 
@@ -974,7 +977,6 @@ namespace eosio {
       bool populate_handshake( handshake_message& hello ) const;
 
       void connect( const tcp::resolver::results_type& endpoints,
-                    connections_manager::connection_details_index& connections,
                     connections_manager::connection_details_index::const_iterator conn_details );
       void start_read_message();
 
@@ -2708,8 +2710,7 @@ namespace eosio {
    //------------------------------------------------------------------------
 
    // called from connection strand
-   void connection::connect( const tcp::resolver::results_type& endpoints, 
-                             connections_manager::connection_details_index& connections,
+   void connection::connect( const tcp::resolver::results_type& endpoints,
                              connections_manager::connection_details_index::const_iterator conn_details ) {
       switch ( no_retry ) {
          case no_reason:
@@ -2733,12 +2734,9 @@ namespace eosio {
       buffer_queue.clear_out_queue();
       boost::asio::async_connect( *socket, endpoints,
          boost::asio::bind_executor( strand,
-               [c = shared_from_this(), socket=socket, &connections, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
+               [c = shared_from_this(), socket=socket, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
             if( !err && socket->is_open() && socket == c->socket ) {
-               auto& index = connections.get<by_active_ip>();
-               index.modify_key(connections.project<by_active_ip>(conn_details), [endpoint](tcp::endpoint& e) {
-                  e = endpoint;
-               });
+               my_impl->connections.update_connection_endpoint(conn_details, endpoint);
                c->update_endpoints(endpoint);
                if( c->start_session() ) {
                   c->send_handshake();
@@ -4059,7 +4057,7 @@ namespace eosio {
                EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
             }
             block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            fc_dlog( logger, "setting block_sync_rate_limit to ${limit} bytes per second", ("limit", block_sync_rate_limit));
+            fc_ilog( logger, "setting block_sync_rate_limit to ${limit} megabytes per second", ("limit", double(block_sync_rate_limit)/1000000));
          } catch (boost::numeric::bad_numeric_cast&) {
             EOS_THROW(plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
          }
@@ -4208,7 +4206,18 @@ namespace eosio {
       set_producer_accounts(producer_plug->producer_accounts());
 
       thread_pool.start( thread_pool_size, []( const fc::exception& e ) {
-         fc_elog( logger, "Exception in net plugin thread pool, exiting: ${e}", ("e", e.to_detail_string()) );
+         using traced = boost::error_info<struct tag_stacktrace, boost::stacktrace::stacktrace>;
+         const boost::stacktrace::stacktrace* st = boost::get_error_info<traced>(e);
+         std::stringstream st_str;
+         if( st != nullptr ) {
+            st_str << *st;
+            fc_elog( logger, "stacktrace wasn't empty");
+         fc_elog( logger, "Exception in net plugin thread pool, exiting:\n${e}${trace}", ("e", e.to_detail_string())("trace", st_str.str()) );
+         }
+         else {
+            fc_elog( logger, "stacktrace was empty");
+         fc_elog( logger, "Exception in net plugin thread pool, exiting:\n${e}", ("e", e.to_detail_string()) );
+         }
          app().quit();
       } );
 
@@ -4439,7 +4448,9 @@ namespace eosio {
 
    // called by API
    string connections_manager::connect( const string& host, const string& p2p_address ) {
+      std::unique_lock g( connections_mtx );
       supplied_peers.insert(host);
+      g.unlock();
       return resolve_and_connect( host, p2p_address );
    }
 
@@ -4470,7 +4481,7 @@ namespace eosio {
                .ips = std::move(eps)
             });
             if( !err ) {
-               it->c->connect( results, connections, it );
+               it->c->connect( results, it );
             } else {
                fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
                         ("host", host)("port", port)( "error", err.message() ) );
@@ -4482,6 +4493,16 @@ namespace eosio {
       return "added connection";
    }
 
+   void connections_manager::update_connection_endpoint(connection_details_index::const_iterator it,
+                                                        const tcp::endpoint& endpoint) {
+      fc_dlog(logger, "updating connection endpoint");
+      std::unique_lock g( connections_mtx );
+      auto& index = connections.get<by_active_ip>();
+      index.modify_key(connections.project<by_active_ip>(it), [endpoint](tcp::endpoint& e) {
+         e = endpoint;
+      });
+   }
+
    // called by API
    string connections_manager::disconnect( const string& host ) {
       std::lock_guard g( connections_mtx );
@@ -4617,12 +4638,15 @@ namespace eosio {
 
          if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
             if (!c->incoming()) {
+               g.unlock();
+               fc_dlog(logger, "attempting to connect in connection_monitor");
                if (!resolve_and_connect(c->peer_address(), c->listen_address)) {
                   it = index.erase(it);
                   --num_peers;
                   ++num_rm;
                   continue;
                }
+               g.lock();
             } else {
                --num_clients;
                ++num_rm;

From ec2d36dcbee917eda12d76601a0f13e69c909569 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Mon, 18 Sep 2023 16:15:49 -0500
Subject: [PATCH 15/61] Restored connection reconnect method.  WIP

---
 plugins/net_plugin/net_plugin.cpp | 34 +++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index af86d9f31d..75d3ada772 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -343,7 +343,7 @@ namespace eosio {
          std::string host;
          connection_ptr c;
          tcp::endpoint active_ip;
-         std::vector<tcp::endpoint> ips;
+         tcp::resolver::results_type ips;
          operator const connection_ptr&() const { return c; }
       };
 
@@ -412,6 +412,7 @@ namespace eosio {
       string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
       void update_connection_endpoint(connection_details_index::const_iterator it, const tcp::endpoint& endpoint);
+      const connection_details_index::index_const_iterator<by_connection>& get_connection_iterator(const connection_ptr& c);
       string disconnect(const string& host);
       void close_all();
 
@@ -976,6 +977,7 @@ namespace eosio {
 
       bool populate_handshake( handshake_message& hello ) const;
 
+      bool reconnect();
       void connect( const tcp::resolver::results_type& endpoints,
                     connections_manager::connection_details_index::const_iterator conn_details );
       void start_read_message();
@@ -2709,9 +2711,7 @@ namespace eosio {
 
    //------------------------------------------------------------------------
 
-   // called from connection strand
-   void connection::connect( const tcp::resolver::results_type& endpoints,
-                             connections_manager::connection_details_index::const_iterator conn_details ) {
+   bool connection::reconnect() {
       switch ( no_retry ) {
          case no_reason:
          case wrong_version:
@@ -2720,15 +2720,26 @@ namespace eosio {
             break;
          default:
             fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry )));
-            return;
+            return false;
       }
       if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) {
          fc::microseconds connector_period = my_impl->connections.get_connector_period();
          fc::lock_guard g( conn_mtx );
          if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) {
-            return;
+            return true;
          }
       }
+      connection_ptr c = shared_from_this();
+      strand.post([c]() {
+         auto it = my_impl->connections.get_connection_iterator(c);
+         c->connect(it->ips, it);
+      });
+      return true;
+   }
+
+   // called from connection strand
+   void connection::connect( const tcp::resolver::results_type& endpoints,
+                             connections_manager::connection_details_index::const_iterator conn_details ) {
       set_state(connection_state::connecting);
       pending_message_buffer.reset();
       buffer_queue.clear_out_queue();
@@ -4473,12 +4484,11 @@ namespace eosio {
          [resolver, host = host, port = port, peer_address = peer_address, listen_address = listen_address, this]( const boost::system::error_code& err, const tcp::resolver::results_type& results ) {
             connection_ptr c = std::make_shared<connection>( peer_address, listen_address );
             c->set_heartbeat_timeout( heartbeat_timeout );
-            vector<tcp::endpoint> eps{results.begin(), results.end()};
             std::lock_guard g( connections_mtx );
-            auto [it, inserted] = connections.insert( connection_details{
+            auto [it, inserted] = connections.emplace( connection_details{
                .host = peer_address,
                .c = std::move(c),
-               .ips = std::move(eps)
+               .ips = results
             });
             if( !err ) {
                it->c->connect( results, it );
@@ -4503,6 +4513,12 @@ namespace eosio {
       });
    }
 
+   const connections_manager::connection_details_index::index_const_iterator<by_connection>& connections_manager::get_connection_iterator(const connection_ptr& c) {
+      std::lock_guard g( connections_mtx );
+      const auto& index = connections.get<by_connection>();
+      return index.find(c);
+   }
+
    // called by API
    string connections_manager::disconnect( const string& host ) {
       std::lock_guard g( connections_mtx );

From 777c8d89532f17703d69e045ddfd7d1b6e302f80 Mon Sep 17 00:00:00 2001
From: Kevin Heifner <heifnerk@objectcomputing.com>
Date: Mon, 18 Sep 2023 17:05:08 -0500
Subject: [PATCH 16/61] Remove stacktrace additions

---
 libraries/libfc/include/fc/exception/exception.hpp |  6 +-----
 plugins/net_plugin/net_plugin.cpp                  | 14 --------------
 2 files changed, 1 insertion(+), 19 deletions(-)

diff --git a/libraries/libfc/include/fc/exception/exception.hpp b/libraries/libfc/include/fc/exception/exception.hpp
index 80fe7694e3..c3baa2c9d5 100644
--- a/libraries/libfc/include/fc/exception/exception.hpp
+++ b/libraries/libfc/include/fc/exception/exception.hpp
@@ -9,9 +9,6 @@
 #include <unordered_map>
 #include <boost/core/typeinfo.hpp>
 #include <boost/interprocess/exceptions.hpp>
-#include <boost/exception/error_info.hpp>
-#include <boost/stacktrace/stacktrace.hpp>
-#include <boost/exception/all.hpp>
 
 namespace fc
 {
@@ -397,8 +394,7 @@ namespace fc
  */
 #define FC_THROW_EXCEPTION( EXCEPTION, FORMAT, ... ) \
   FC_MULTILINE_MACRO_BEGIN \
-    using traced = boost::error_info<struct tag_stacktrace, boost::stacktrace::stacktrace>; \
-    throw boost::enable_error_info( EXCEPTION( FC_LOG_MESSAGE( error, FORMAT, __VA_ARGS__ ) ) ) << traced(boost::stacktrace::stacktrace()); \
+    throw EXCEPTION( FC_LOG_MESSAGE( error, FORMAT, __VA_ARGS__ ) ); \
   FC_MULTILINE_MACRO_END
 
 
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index af86d9f31d..68e40e1fea 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -25,8 +25,6 @@
 #include <boost/asio/ip/host_name.hpp>
 #include <boost/asio/steady_timer.hpp>
 #include <boost/multi_index/key.hpp>
-#include <boost/stacktrace/stacktrace.hpp>
-#include <boost/exception/all.hpp>
 
 #include <atomic>
 #include <cmath>
@@ -4206,18 +4204,6 @@ namespace eosio {
       set_producer_accounts(producer_plug->producer_accounts());
 
       thread_pool.start( thread_pool_size, []( const fc::exception& e ) {
-         using traced = boost::error_info<struct tag_stacktrace, boost::stacktrace::stacktrace>;
-         const boost::stacktrace::stacktrace* st = boost::get_error_info<traced>(e);
-         std::stringstream st_str;
-         if( st != nullptr ) {
-            st_str << *st;
-            fc_elog( logger, "stacktrace wasn't empty");
-         fc_elog( logger, "Exception in net plugin thread pool, exiting:\n${e}${trace}", ("e", e.to_detail_string())("trace", st_str.str()) );
-         }
-         else {
-            fc_elog( logger, "stacktrace was empty");
-         fc_elog( logger, "Exception in net plugin thread pool, exiting:\n${e}", ("e", e.to_detail_string()) );
-         }
          app().quit();
       } );
 

From 05be825d4662822d98960233be008f95f3c4deca Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 19 Sep 2023 21:28:06 -0500
Subject: [PATCH 17/61] Update netApi connect test.

---
 tests/plugin_http_api_test.py | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/tests/plugin_http_api_test.py b/tests/plugin_http_api_test.py
index 0f49f458ee..c11a5cc21f 100755
--- a/tests/plugin_http_api_test.py
+++ b/tests/plugin_http_api_test.py
@@ -791,7 +791,12 @@ def test_NetApi(self) :
         ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint)
         self.assertEqual(ret_json["code"], 400)
         self.assertEqual(ret_json["error"]["code"], 3200006)
+        # connect with incomplete content parameter
         payload = "localhost"
+        ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint)
+        self.assertEqual(ret_json["code"], 201)
+        self.assertEqual(ret_json["payload"], 'invalid peer address')
+        payload = "localhost:9877"
         ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii')
         self.assertEqual("\"added connection\"", ret_str)
 

From 24f4aad3e1b54175b21ecb66bbae8d7d6378c992 Mon Sep 17 00:00:00 2001
From: Kevin Heifner <heifnerk@objectcomputing.com>
Date: Wed, 20 Sep 2023 11:39:01 -0500
Subject: [PATCH 18/61] GH-1295 Project index to default iterator

---
 plugins/net_plugin/net_plugin.cpp | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 9878cf4fa6..cd42cdbfcb 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -416,7 +416,7 @@ namespace eosio {
       string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
       void update_connection_endpoint(connection_details_index::const_iterator it, const tcp::endpoint& endpoint);
-      const connection_details_index::index_const_iterator<by_connection>& get_connection_iterator(const connection_ptr& c);
+      connection_details_index::iterator get_connection_iterator(const connection_ptr& c);
       string disconnect(const string& host);
       void close_all();
 
@@ -4512,10 +4512,11 @@ namespace eosio {
       });
    }
 
-   const connections_manager::connection_details_index::index_const_iterator<by_connection>& connections_manager::get_connection_iterator(const connection_ptr& c) {
+   connections_manager::connection_details_index::iterator connections_manager::get_connection_iterator(const connection_ptr& c) {
       std::lock_guard g( connections_mtx );
       const auto& index = connections.get<by_connection>();
-      return index.find(c);
+      auto i = index.find(c);
+      return connections.project<by_host>(i);
    }
 
    // called by API

From 97591fe467defed6d96d7efe6ffcf76d48198c92 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 20 Sep 2023 15:49:25 -0500
Subject: [PATCH 19/61] Use reconnect method as intended, and avoid threading
 issue.

---
 plugins/net_plugin/net_plugin.cpp | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index cd42cdbfcb..e4d7b2dd53 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4656,8 +4656,10 @@ namespace eosio {
             if (!c->incoming()) {
                g.unlock();
                fc_dlog(logger, "attempting to connect in connection_monitor");
-               if (!resolve_and_connect(c->peer_address(), c->listen_address)) {
+               if (!c->reconnect()) {
+                  g.lock();
                   it = index.erase(it);
+                  g.unlock();
                   --num_peers;
                   ++num_rm;
                   continue;

From ffee0df209a0c348faea4310363e963f5ab8f366 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 20 Sep 2023 18:00:49 -0500
Subject: [PATCH 20/61] Use std::any_of when finding supplied peers to unlimit
 a connection.

---
 plugins/net_plugin/net_plugin.cpp | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index e4d7b2dd53..577c00db57 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -424,7 +424,7 @@ namespace eosio {
       vector<connection_status> connection_statuses() const;
 
       template <typename Function>
-      void for_each_supplied_peer(Function&& f) const;
+      bool for_any_supplied_peer(Function&& f) const;
 
       template <typename Function>
       void for_each_connection(Function&& f) const;
@@ -1184,9 +1184,9 @@ namespace eosio {
 
 
    template<typename Function>
-   void connections_manager::for_each_supplied_peer( Function&& f ) const {
+   bool connections_manager::for_any_supplied_peer( Function&& f ) const {
       std::shared_lock g( connections_mtx );
-      std::for_each(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
+      return std::any_of(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
@@ -2795,14 +2795,16 @@ namespace eosio {
                visitors < connections.get_max_client_count())) {
             fc_ilog(logger, "Accepted new connection: " + paddr_str);
 
-            connections.for_each_supplied_peer([&listen_address, &paddr_str, &limit](const string& peer_addr) {
+            connections.for_any_supplied_peer([&listen_address, &paddr_str, &limit](const string& peer_addr) {
                auto [host, port, type] = split_host_port_type(peer_addr);
                if (host == paddr_str) {
                   if (limit > 0) {
                      fc_dlog(logger, "Connection inbound to ${la} from ${a} is a configured p2p-peer-address and will not be throttled", ("la", listen_address)("a", paddr_str));
                   }
                   limit = 0;
+                  return true;
                }
+               return false;
             });
 
             connection_ptr new_connection = std::make_shared<connection>(std::move(socket), listen_address, limit);

From fb740ef57264587596c0844b0dd409809c3b3170 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 22 Sep 2023 20:10:38 -0500
Subject: [PATCH 21/61] Change language in p2p_multiple_listen_test for
 clarity.

---
 tests/p2p_multiple_listen_test.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py
index 62f1534c63..7f537e9d35 100755
--- a/tests/p2p_multiple_listen_test.py
+++ b/tests/p2p_multiple_listen_test.py
@@ -75,7 +75,7 @@
                 assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9878', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9878"
             elif conn['last_handshake']['agent'] == 'node-04':
                 assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9880', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9880"
-    assert open_socket_count == 2, 'Node 0 is expected to have only two open sockets'
+    assert open_socket_count == 2, 'Node 0 is expected to have exactly two open sockets'
 
     connections = cluster.nodes[2].processUrllibRequest('net', 'connections')
     open_socket_count = 0
@@ -84,7 +84,7 @@
             open_socket_count += 1
             assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00"
             assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip0:20000', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]}' instead of ext-ip0:20000"
-    assert open_socket_count == 1, 'Node 2 is expected to have only one open socket'
+    assert open_socket_count == 1, 'Node 2 is expected to have exactly one open socket'
 
     connections = cluster.nodes[4].processUrllibRequest('net', 'connections')
     open_socket_count = 0
@@ -93,7 +93,7 @@
             open_socket_count += 1
             assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00"
             assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip1:20001', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]} 'instead of ext-ip1:20001"
-    assert open_socket_count == 1, 'Node 4 is expected to have only one open socket'
+    assert open_socket_count == 1, 'Node 4 is expected to have exactly one open socket'
 
     testSuccessful=True
 finally:

From 453bfcfe2e91b7833013fd3779ac626e1b4f5d43 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 22 Sep 2023 20:11:31 -0500
Subject: [PATCH 22/61] Tolerate duplicate (empty) peer addresses in connection
 manager.

---
 plugins/net_plugin/net_plugin.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 577c00db57..4351676af1 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -354,7 +354,7 @@ namespace eosio {
       using connection_details_index = multi_index_container<
          connection_details,
          indexed_by<
-            ordered_unique<
+            ordered_non_unique<
                tag<struct by_host>,
                key<&connection_details::host>
             >,
@@ -4558,10 +4558,10 @@ namespace eosio {
    vector<connection_status> connections_manager::connection_statuses()const {
       vector<connection_status> result;
       std::shared_lock g( connections_mtx );
-      auto& index = connections.get<by_host>();
+      auto& index = connections.get<by_connection>();
       result.reserve( index.size() );
       for( const connection_ptr& c : index ) {
-         result.push_back( c->get_status() );
+         result.emplace_back( c->get_status() );
       }
       return result;
    }

From 99f02c0641b0f776fa66f7fabc89f39c2a1d0e1d Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 26 Sep 2023 19:27:34 -0500
Subject: [PATCH 23/61] Add rate limit parse unittest.

---
 plugins/net_plugin/net_plugin.cpp             | 60 ++++++++++---------
 plugins/net_plugin/tests/CMakeLists.txt       | 10 +++-
 .../tests/rate_limit_parse_unittest.cpp       | 54 +++++++++++++++++
 3 files changed, 96 insertions(+), 28 deletions(-)
 create mode 100644 plugins/net_plugin/tests/rate_limit_parse_unittest.cpp

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 4351676af1..62ec05256a 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -573,7 +573,8 @@ namespace eosio {
 
       constexpr static uint16_t to_protocol_version(uint16_t v);
 
-      size_t parse_connection_rate_limit(const string& limit_str);
+      std::tuple<std::string, size_t> parse_listen_address(const std::string& peer) const;
+      size_t parse_connection_rate_limit(const string& limit_str) const;
       void plugin_initialize(const variables_map& options);
       void plugin_startup();
       void plugin_shutdown();
@@ -4056,13 +4057,34 @@ namespace eosio {
       return fc::json::from_string(s).as<T>();
    }
 
-   size_t net_plugin_impl::parse_connection_rate_limit( const std::string& limit_str) {
+   std::tuple<std::string, size_t> net_plugin_impl::parse_listen_address( const std::string& address ) const {
+      auto listen_addr = address;
+      auto limit = string("0");
+      auto last_colon_location = address.rfind(':');
+      if( auto right_bracket_location = address.find(']'); right_bracket_location != address.npos ) {
+         if( std::count(address.begin()+right_bracket_location, address.end(), ':') > 1 ) {
+            listen_addr = std::string(address, 0, last_colon_location);
+            limit = std::string(address, last_colon_location+1);
+         }
+      } else {
+         if( auto colon_count = std::count(address.begin(), address.end(), ':'); colon_count > 1 ) {
+            EOS_ASSERT( colon_count <= 2, plugin_config_exception, "Invalid address specification ${addr}; IPv6 addresses must be enclosed in square brackets.", ("addr", address));
+            listen_addr = std::string(address, 0, last_colon_location);
+            limit = std::string(address, last_colon_location+1);
+         }
+      }
+      auto block_sync_rate_limit = parse_connection_rate_limit(limit);
+
+      return {listen_addr, block_sync_rate_limit};
+   }
+
+   size_t net_plugin_impl::parse_connection_rate_limit( const std::string& limit_str) const {
       std::istringstream in(limit_str);
       fc_dlog( logger, "parsing connection endpoint limit ${limit} with locale ${l}", ("limit", limit_str)("l", std::locale("").name()));
       in.imbue(std::locale(""));
       double limit{0};
       in >> limit;
-      EOS_ASSERT(limit >= 0.0, plugin_config_exception, "block sync rate limit must be positive: ${limit}", ("limit", limit_str));
+      EOS_ASSERT(limit >= 0.0, plugin_config_exception, "block sync rate limit must not be negative: ${limit}", ("limit", limit_str));
       size_t block_sync_rate_limit = 0;
       if( limit > 0.0 ) {
          std::string units;
@@ -4070,14 +4092,14 @@ namespace eosio {
          std::regex units_regex{"([KMGT]?[i]?)B/s"};
          std::smatch units_match;
          std::regex_match(units, units_match, units_regex);
-         try {
-            if( units.length() > 0 ) {
-               EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
+         if( units.length() > 0 ) {
+            EOS_ASSERT(units_match.size() == 2, plugin_config_exception, "invalid block sync rate limit specification: ${limit}", ("limit", units));
+            try {
+               block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
+               fc_ilog( logger, "setting block_sync_rate_limit to ${limit} megabytes per second", ("limit", double(block_sync_rate_limit)/1000000));
+            } catch (boost::numeric::bad_numeric_cast&) {
+               EOS_THROW(plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
             }
-            block_sync_rate_limit = boost::numeric_cast<size_t>(limit * prefix_multipliers.at(units_match[1].str()));
-            fc_ilog( logger, "setting block_sync_rate_limit to ${limit} megabytes per second", ("limit", double(block_sync_rate_limit)/1000000));
-         } catch (boost::numeric::bad_numeric_cast&) {
-            EOS_THROW(plugin_config_exception, "block sync rate limit specification overflowed: ${limit}", ("limit", limit_str));
          }
       }
       return block_sync_rate_limit;
@@ -4298,23 +4320,7 @@ namespace eosio {
                std::string extra_listening_log_info =
                      ", max clients is " + std::to_string(my->connections.get_max_client_count());
 
-               auto listen_addr = address;
-               auto limit = string("0");
-               auto last_colon_location = address.rfind(':');
-               if( auto right_bracket_location = address.find(']'); right_bracket_location != address.npos ) {
-                  if( std::count(address.begin()+right_bracket_location, address.end(), ':') > 1 ) {
-                     listen_addr = std::string(address, 0, last_colon_location);
-                     limit = std::string(address, last_colon_location+1);
-                  }
-               } else {
-                  if( auto colon_count = std::count(address.begin(), address.end(), ':'); colon_count > 1 ) {
-                     EOS_ASSERT( colon_count <= 2, plugin_config_exception, "Invalid address specification ${addr}; IPv6 addresses must be enclosed in square brackets.", ("addr", address));
-                     listen_addr = std::string(address, 0, last_colon_location);
-                     limit = std::string(address, last_colon_location+1);
-                  }
-               }
-
-               auto block_sync_rate_limit = my->parse_connection_rate_limit(limit);
+               auto [listen_addr, block_sync_rate_limit] = my->parse_listen_address(address);
 
                fc::create_listener<tcp>(
                      my->thread_pool.get_executor(), logger, accept_timeout, listen_addr, extra_listening_log_info,
diff --git a/plugins/net_plugin/tests/CMakeLists.txt b/plugins/net_plugin/tests/CMakeLists.txt
index bcabe6428f..210a748e07 100644
--- a/plugins/net_plugin/tests/CMakeLists.txt
+++ b/plugins/net_plugin/tests/CMakeLists.txt
@@ -5,4 +5,12 @@ target_link_libraries(auto_bp_peering_unittest eosio_chain)
 
 target_include_directories(auto_bp_peering_unittest PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" )
 
-add_test(auto_bp_peering_unittest auto_bp_peering_unittest)
\ No newline at end of file
+add_test(auto_bp_peering_unittest auto_bp_peering_unittest)
+
+add_executable(rate_limit_parse_unittest rate_limit_parse_unittest.cpp)
+
+target_link_libraries(rate_limit_parse_unittest net_plugin)
+
+target_include_directories(rate_limit_parse_unittest PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+
+add_test(rate_limit_parse_unittest rate_limit_parse_unittest)
diff --git a/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp b/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp
new file mode 100644
index 0000000000..01c84e8a67
--- /dev/null
+++ b/plugins/net_plugin/tests/rate_limit_parse_unittest.cpp
@@ -0,0 +1,54 @@
+#define BOOST_TEST_MODULE rate_limit_parsing
+#include <boost/test/included/unit_test.hpp>
+#include "../net_plugin.cpp"
+
+BOOST_AUTO_TEST_CASE(test_parse_rate_limit) {
+   eosio::net_plugin_impl plugin_impl;
+   std::vector<std::string> p2p_addresses = {
+        "0.0.0.0:9876"
+      , "0.0.0.0:9776:0"
+      , "0.0.0.0:9877:640KB/s"
+      , "192.168.0.1:9878:20MiB/s"
+      , "localhost:9879:0.5KB/s"
+      , "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:9876:250KB/s"
+      , "[::1]:9876:250KB/s"
+      , "2001:db8:85a3:8d3:1319:8a2e:370:7348:9876:250KB/s"
+      , "[::1]:9876:-250KB/s"
+      , "0.0.0.0:9877:640Kb/s"
+      , "0.0.0.0:9877:999999999999999999999999999TiB/s"
+   };
+   size_t which = 0;
+   auto [listen_addr, block_sync_rate_limit] = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9876");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 0);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9776");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 0);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "0.0.0.0:9877");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 640000);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "192.168.0.1:9878");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 20971520);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "localhost:9879");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 500);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:9876");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000);
+   std::tie(listen_addr, block_sync_rate_limit) = plugin_impl.parse_listen_address(p2p_addresses[which++]);
+   BOOST_CHECK_EQUAL(listen_addr, "[::1]:9876");
+   BOOST_CHECK_EQUAL(block_sync_rate_limit, 250000);
+   BOOST_CHECK_EXCEPTION(plugin_impl.parse_listen_address(p2p_addresses[which++]), eosio::chain::plugin_config_exception,
+                         [](const eosio::chain::plugin_config_exception& e)
+                         {return std::strstr(e.top_message().c_str(), "IPv6 addresses must be enclosed in square brackets");});
+   BOOST_CHECK_EXCEPTION(plugin_impl.parse_listen_address(p2p_addresses[which++]), eosio::chain::plugin_config_exception,
+                         [](const eosio::chain::plugin_config_exception& e)
+                         {return std::strstr(e.top_message().c_str(), "block sync rate limit must not be negative");});
+   BOOST_CHECK_EXCEPTION(plugin_impl.parse_listen_address(p2p_addresses[which++]), eosio::chain::plugin_config_exception,
+                         [](const eosio::chain::plugin_config_exception& e)
+                         {return std::strstr(e.top_message().c_str(), "invalid block sync rate limit specification");});
+   BOOST_CHECK_EXCEPTION(plugin_impl.parse_listen_address(p2p_addresses[which++]), eosio::chain::plugin_config_exception,
+                         [](const eosio::chain::plugin_config_exception& e)
+                         {return std::strstr(e.top_message().c_str(), "block sync rate limit specification overflowed");});
+}

From b16184aa25c5a2eafc9da0c7d8df7cba5490db30 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 14:42:24 -0500
Subject: [PATCH 24/61] Tolerate node running with no listen endpoints.

Add mock_connection constructor required by clang14.
---
 .../include/eosio/net_plugin/auto_bp_peering.hpp      |  2 +-
 plugins/net_plugin/net_plugin.cpp                     | 11 +++++++++--
 plugins/net_plugin/tests/auto_bp_peering_unittest.cpp |  6 ++++++
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
index 8a4f736680..d88c47729e 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp
@@ -182,7 +182,7 @@ class bp_connection_manager {
 
                fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}",
                        ("pending_downstream_neighbors", to_string(pending_downstream_neighbors)));
-               for (auto neighbor : pending_downstream_neighbors) { self()->connections.resolve_and_connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); }
+               for (auto neighbor : pending_downstream_neighbors) { self()->connections.resolve_and_connect(config.bp_peer_addresses[neighbor], self()->get_first_p2p_address() ); }
 
                pending_neighbors = std::move(pending_downstream_neighbors);
                finder.add_upstream_neighbors(pending_neighbors);
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 62ec05256a..f22636bd65 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -454,6 +454,7 @@ namespace eosio {
        */
       vector<string>                        p2p_addresses;
       vector<string>                        p2p_server_addresses;
+      const string&                         get_first_p2p_address() const;
 
       vector<chain::public_key_type>        allowed_peers; ///< peer keys allowed to connect
       std::map<chain::public_key_type,
@@ -582,6 +583,8 @@ namespace eosio {
       fc::logger& get_logger() { return logger; }
 
       void create_session(tcp::socket&& socket, const string listen_address, size_t limit);
+
+      std::string empty{};
    }; //net_plugin_impl
 
    // peer_[x]log must be called from thread in connection strand
@@ -2770,6 +2773,10 @@ namespace eosio {
    }
 
 
+   const string& net_plugin_impl::get_first_p2p_address() const {
+      return p2p_addresses.size() > 0 ? *p2p_addresses.begin() : empty;
+   }
+
    void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address, size_t limit) {
       uint32_t                  visitors  = 0;
       uint32_t                  from_addr = 0;
@@ -4337,7 +4344,7 @@ namespace eosio {
          my->ticker();
          my->start_monitors();
          my->update_chain_info();
-         my->connections.connect_supplied_peers(*my->p2p_addresses.begin()); // attribute every outbound connection to the first listen port
+         my->connections.connect_supplied_peers(my->get_first_p2p_address()); // attribute every outbound connection to the first listen port when one exists
       });
    }
 
@@ -4374,7 +4381,7 @@ namespace eosio {
 
    /// RPC API
    string net_plugin::connect( const string& host ) {
-      return my->connections.connect( host, *my->p2p_addresses.begin() );
+      return my->connections.connect( host, my->get_first_p2p_address() );
    }
 
    /// RPC API
diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
index d9e0594793..57c7a8f6a1 100644
--- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
+++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp
@@ -6,6 +6,11 @@ struct mock_connection {
    bool is_bp_connection   = false;
    bool is_open            = false;
    bool handshake_received = false;
+   mock_connection(bool bp_connection, bool open, bool received)
+     : is_bp_connection(bp_connection)
+     , is_open(open)
+     , handshake_received(received)
+   {}
 
    bool socket_is_open() const { return is_open; }
    bool incoming_and_handshake_received() const { return handshake_received; }
@@ -37,6 +42,7 @@ struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager<mock_net_
    bool                         is_in_sync = false;
    mock_connections_manager     connections;
    std::vector<std::string>     p2p_addresses{"0.0.0.0:9876"};
+   const std::string&           get_first_p2p_address() const { return *p2p_addresses.begin(); }
 
    bool in_sync() { return is_in_sync; }
 

From df6d948acb2a8c62d512ba1c02311a12e98eaa50 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 14:46:34 -0500
Subject: [PATCH 25/61] Restore lock of connections mutex when connecting
 configured peers.

---
 plugins/net_plugin/net_plugin.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index f22636bd65..bdc43ade77 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4456,6 +4456,7 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
+      std::unique_lock g(connections_mtx);
       for (const auto& peer : supplied_peers) {
          resolve_and_connect(peer, p2p_address);
       }

From 733849b51c3f4b354404d40ab32ffefbd029da78 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 15:27:11 -0500
Subject: [PATCH 26/61] Don't pass around iterators that may be invalidated by
 an erase.

---
 plugins/net_plugin/net_plugin.cpp | 37 ++++++++++++++-----------------
 1 file changed, 17 insertions(+), 20 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index bdc43ade77..b4073814f0 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -361,10 +361,6 @@ namespace eosio {
             ordered_unique<
                tag<struct by_connection>,
                key<&connection_details::c>
-            >,
-            ordered_non_unique<
-               tag<struct by_active_ip>,
-               key<&connection_details::active_ip>
             >
          >
       >;
@@ -415,7 +411,7 @@ namespace eosio {
       void add(connection_ptr c);
       string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
-      void update_connection_endpoint(connection_details_index::const_iterator it, const tcp::endpoint& endpoint);
+      void update_connection_endpoint(connection_ptr c, const tcp::endpoint& endpoint);
       connection_details_index::iterator get_connection_iterator(const connection_ptr& c);
       string disconnect(const string& host);
       void close_all();
@@ -810,7 +806,8 @@ namespace eosio {
       /// assignment not allowed
       block_status_monitor& operator=( const block_status_monitor& ) = delete;
       block_status_monitor& operator=( block_status_monitor&& ) = delete;
-   };
+   }; // block_status_monitor
+
 
    class connection : public std::enable_shared_from_this<connection> {
    public:
@@ -986,8 +983,7 @@ namespace eosio {
       bool populate_handshake( handshake_message& hello ) const;
 
       bool reconnect();
-      void connect( const tcp::resolver::results_type& endpoints,
-                    connections_manager::connection_details_index::const_iterator conn_details );
+      void connect( const tcp::resolver::results_type& endpoints );
       void start_read_message();
 
       /** \brief Process the next message from the pending message buffer
@@ -2741,22 +2737,21 @@ namespace eosio {
       connection_ptr c = shared_from_this();
       strand.post([c]() {
          auto it = my_impl->connections.get_connection_iterator(c);
-         c->connect(it->ips, it);
+         c->connect(it->ips);
       });
       return true;
    }
 
    // called from connection strand
-   void connection::connect( const tcp::resolver::results_type& endpoints,
-                             connections_manager::connection_details_index::const_iterator conn_details ) {
+   void connection::connect( const tcp::resolver::results_type& endpoints ) {
       set_state(connection_state::connecting);
       pending_message_buffer.reset();
       buffer_queue.clear_out_queue();
       boost::asio::async_connect( *socket, endpoints,
          boost::asio::bind_executor( strand,
-               [c = shared_from_this(), socket=socket, conn_details]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
+               [c = shared_from_this(), socket=socket]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) {
             if( !err && socket->is_open() && socket == c->socket ) {
-               my_impl->connections.update_connection_endpoint(conn_details, endpoint);
+               my_impl->connections.update_connection_endpoint(c, endpoint);
                c->update_endpoints(endpoint);
                if( c->start_session() ) {
                   c->send_handshake();
@@ -4506,7 +4501,7 @@ namespace eosio {
                .ips = results
             });
             if( !err ) {
-               it->c->connect( results, it );
+               it->c->connect( results );
             } else {
                fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
                         ("host", host)("port", port)( "error", err.message() ) );
@@ -4518,14 +4513,16 @@ namespace eosio {
       return "added connection";
    }
 
-   void connections_manager::update_connection_endpoint(connection_details_index::const_iterator it,
+   void connections_manager::update_connection_endpoint(connection_ptr c,
                                                         const tcp::endpoint& endpoint) {
-      fc_dlog(logger, "updating connection endpoint");
       std::unique_lock g( connections_mtx );
-      auto& index = connections.get<by_active_ip>();
-      index.modify_key(connections.project<by_active_ip>(it), [endpoint](tcp::endpoint& e) {
-         e = endpoint;
-      });
+      auto& index = connections.get<by_connection>();
+      const auto& it = index.find(c);
+      if( it != index.end() ) {
+         index.modify(it, [endpoint](connection_details& d) {
+            d.active_ip = endpoint;
+         });
+      }
    }
 
    connections_manager::connection_details_index::iterator connections_manager::get_connection_iterator(const connection_ptr& c) {

From ed692388965c86e9409dc0d3972efa81530dfca0 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 15:33:19 -0500
Subject: [PATCH 27/61] Renamed method.

---
 plugins/net_plugin/net_plugin.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index b4073814f0..7b5774c395 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -420,7 +420,7 @@ namespace eosio {
       vector<connection_status> connection_statuses() const;
 
       template <typename Function>
-      bool for_any_supplied_peer(Function&& f) const;
+      bool any_of_supplied_peers(Function&& f) const;
 
       template <typename Function>
       void for_each_connection(Function&& f) const;
@@ -1184,7 +1184,7 @@ namespace eosio {
 
 
    template<typename Function>
-   bool connections_manager::for_any_supplied_peer( Function&& f ) const {
+   bool connections_manager::any_of_supplied_peers( Function&& f ) const {
       std::shared_lock g( connections_mtx );
       return std::any_of(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
    }
@@ -2798,7 +2798,7 @@ namespace eosio {
                visitors < connections.get_max_client_count())) {
             fc_ilog(logger, "Accepted new connection: " + paddr_str);
 
-            connections.for_any_supplied_peer([&listen_address, &paddr_str, &limit](const string& peer_addr) {
+            connections.any_of_supplied_peers([&listen_address, &paddr_str, &limit](const string& peer_addr) {
                auto [host, port, type] = split_host_port_type(peer_addr);
                if (host == paddr_str) {
                   if (limit > 0) {

From 2f80663a2343b495fad30d95d0e3cfb96ac80206 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 15:49:19 -0500
Subject: [PATCH 28/61] Break encapsulation less.

Delegate reconnecting back to connections_manager rather than have
connection try to do it itself.
---
 plugins/net_plugin/net_plugin.cpp | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 7b5774c395..b00462715e 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -412,7 +412,7 @@ namespace eosio {
       string connect(const string& host, const string& p2p_address);
       string resolve_and_connect(const string& host, const string& p2p_address);
       void update_connection_endpoint(connection_ptr c, const tcp::endpoint& endpoint);
-      connection_details_index::iterator get_connection_iterator(const connection_ptr& c);
+      void connect(const connection_ptr& c);
       string disconnect(const string& host);
       void close_all();
 
@@ -2731,13 +2731,12 @@ namespace eosio {
          fc::microseconds connector_period = my_impl->connections.get_connector_period();
          fc::lock_guard g( conn_mtx );
          if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) {
-            return true;
+            return true; // true so doesn't remove from valid connections
          }
       }
       connection_ptr c = shared_from_this();
       strand.post([c]() {
-         auto it = my_impl->connections.get_connection_iterator(c);
-         c->connect(it->ips);
+         my_impl->connections.connect(c);
       });
       return true;
    }
@@ -4525,11 +4524,13 @@ namespace eosio {
       }
    }
 
-   connections_manager::connection_details_index::iterator connections_manager::get_connection_iterator(const connection_ptr& c) {
+   void connections_manager::connect(const connection_ptr& c) {
       std::lock_guard g( connections_mtx );
       const auto& index = connections.get<by_connection>();
-      auto i = index.find(c);
-      return connections.project<by_host>(i);
+      const auto& it = index.find(c);
+      if( it != index.end() ) {
+         it->c->connect( it->ips );
+      }
    }
 
    // called by API

From 7019b657a7cb5d887ee9e41a6859410c0aa26b08 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 15:53:45 -0500
Subject: [PATCH 29/61] Thread safety.

---
 plugins/net_plugin/net_plugin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index b00462715e..fde60d3255 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4548,9 +4548,9 @@ namespace eosio {
    }
 
    void connections_manager::close_all() {
+      std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
-      std::lock_guard g( connections_mtx );
       for( const connection_ptr& c : index ) {
          fc_dlog( logger, "close: ${cid}", ("cid", c->connection_id) );
          c->close( false, true );

From 4d136e33a2b1f80fae149427167f486fee1be7cc Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 16:00:48 -0500
Subject: [PATCH 30/61] Revert "Restore lock of connections mutex when
 connecting configured peers."

This reverts commit df6d948acb2a8c62d512ba1c02311a12e98eaa50.
---
 plugins/net_plugin/net_plugin.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index fde60d3255..59fabe5913 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4450,7 +4450,6 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
-      std::unique_lock g(connections_mtx);
       for (const auto& peer : supplied_peers) {
          resolve_and_connect(peer, p2p_address);
       }

From 3708418ae178e8f9602441db4ae3267b018aa9f2 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 16:19:35 -0500
Subject: [PATCH 31/61] Restore lock of connections mutex when connecting
 configured peers.

---
 plugins/net_plugin/net_plugin.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 59fabe5913..57987e58a7 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4450,7 +4450,10 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
-      for (const auto& peer : supplied_peers) {
+      std::unique_lock g(connections_mtx);
+      chain::flat_set<string> peers = supplied_peers;
+      g.unlock();
+      for (const auto& peer : peers) {
          resolve_and_connect(peer, p2p_address);
       }
    }

From 4baec727ab8bd2d8366abba907f84c70e247ec0c Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 16:29:30 -0500
Subject: [PATCH 32/61] Accept suggested refactoring.

---
 plugins/net_plugin/net_plugin.cpp | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 57987e58a7..1a5b41cbf9 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1725,16 +1725,12 @@ namespace eosio {
          if( block_sync_rate_limit > 0 ) {
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
-            if( current_rate < block_sync_rate_limit ) {
-               block_sync_bytes_sent += enqueue_block( sb, true );
-               ++peer_requested->last;
-            } else {
+            if( current_rate >= block_sync_rate_limit ) {
                return false;
             }
-         } else {
-            block_sync_bytes_sent += enqueue_block( sb, true );
-            ++peer_requested->last;
          }
+         block_sync_bytes_sent += enqueue_block( sb, true );
+         ++peer_requested->last;
       } else {
          peer_ilog( this, "enqueue sync, unable to fetch block ${num}, sending benign_other go away", ("num", num) );
          peer_requested.reset(); // unable to provide requested blocks

From 8d2c1c2426e5d395bce80770fa6e86c52eff5e7a Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 27 Sep 2023 16:30:32 -0500
Subject: [PATCH 33/61] Remove some unused machine-generated variables from
 custom shape file.

---
 tests/p2p_sync_throttle_test_shape.json | 10 ----------
 1 file changed, 10 deletions(-)

diff --git a/tests/p2p_sync_throttle_test_shape.json b/tests/p2p_sync_throttle_test_shape.json
index 4252ab483a..8cfb5ce9a5 100644
--- a/tests/p2p_sync_throttle_test_shape.json
+++ b/tests/p2p_sync_throttle_test_shape.json
@@ -17,8 +17,6 @@
         "eosio"
       ], 
       "dont_start": false, 
-      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_bios", 
-      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_bios", 
       "p2p_port": 9776, 
       "http_port": 8788, 
       "host_name": "localhost", 
@@ -60,8 +58,6 @@
         "defproduceru"
       ], 
       "dont_start": false, 
-      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_00", 
-      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_00", 
       "p2p_port": 9876, 
       "http_port": 8888, 
       "host_name": "localhost", 
@@ -83,8 +79,6 @@
       ], 
       "producers": [], 
       "dont_start": false, 
-      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_01", 
-      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_01", 
       "p2p_port": 9877, 
       "http_port": 8889, 
       "host_name": "localhost", 
@@ -106,8 +100,6 @@
       ], 
       "producers": [], 
       "dont_start": true, 
-      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_02", 
-      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_02", 
       "p2p_port": 9878, 
       "http_port": 8890, 
       "host_name": "localhost", 
@@ -129,8 +121,6 @@
       ],
       "producers": [],
       "dont_start": true, 
-      "config_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_03", 
-      "data_dir_name": "/home/giszczakj/Dev/leap/build/TestLogs/p2p_sync_throttle_test1192292/node_03", 
       "p2p_port": 9879, 
       "http_port": 8891, 
       "host_name": "localhost", 

From 7e37de11f46854f2b82e34174717300ac64b0310 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Thu, 28 Sep 2023 17:53:08 -0500
Subject: [PATCH 34/61] Convert connections mutex to resursive_mutex and update
 locks.

Split prometheus statistics out of connection_monitor into
connection_statistics_monitor.
---
 plugins/net_plugin/net_plugin.cpp | 195 ++++++++++++++++++------------
 1 file changed, 118 insertions(+), 77 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 1a5b41cbf9..113c0bc94d 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -364,15 +364,18 @@ namespace eosio {
             >
          >
       >;
+      enum class timer_type { check, stats };
    private:
       alignas(hardware_destructive_interference_size)
-      mutable std::shared_mutex        connections_mtx;
+      mutable std::recursive_mutex     connections_mtx;
       connection_details_index         connections;
       chain::flat_set<string>          supplied_peers;
 
       alignas(hardware_destructive_interference_size)
       fc::mutex                             connector_check_timer_mtx;
       unique_ptr<boost::asio::steady_timer> connector_check_timer GUARDED_BY(connector_check_timer_mtx);
+      fc::mutex                             connection_stats_timer_mtx;
+      unique_ptr<boost::asio::steady_timer> connection_stats_timer GUARDED_BY(connection_stats_timer_mtx);
 
       /// thread safe, only modified on startup
       std::chrono::milliseconds                                heartbeat_timeout{def_keepalive_interval*2};
@@ -385,6 +388,7 @@ namespace eosio {
       connection_ptr find_connection_i(const string& host) const;
 
       void connection_monitor(const std::weak_ptr<connection>& from_connection);
+      void connection_statistics_monitor(const std::weak_ptr<connection>& from_connection);
 
    public:
       size_t number_connections() const;
@@ -404,9 +408,11 @@ namespace eosio {
 
       void connect_supplied_peers(const string& p2p_address);
 
-      void start_conn_timer();
-      void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr<connection> from_connection);
-      void stop_conn_timer();
+      void start_conn_timers();
+      void start_conn_timer(boost::asio::steady_timer::duration du,
+                            std::weak_ptr<connection> from_connection,
+                            timer_type which);
+      void stop_conn_timers();
 
       void add(connection_ptr c);
       string connect(const string& host, const string& p2p_address);
@@ -1185,20 +1191,20 @@ namespace eosio {
 
    template<typename Function>
    bool connections_manager::any_of_supplied_peers( Function&& f ) const {
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       return std::any_of(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_connection( Function&& f ) const {
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       std::for_each(index.begin(), index.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_block_connection( Function&& f ) const {
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if (c->is_blocks_connection()) {
@@ -1209,14 +1215,14 @@ namespace eosio {
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_connections(UnaryPredicate&& p) const {
-      std::shared_lock g(connections_mtx);
+      const std::lock_guard g(connections_mtx);
       auto& index = connections.get<by_host>();
       return std::any_of(index.cbegin(), index.cend(), std::forward<UnaryPredicate>(p));
    }
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const {
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if( c->is_blocks_connection() ) {
@@ -1444,7 +1450,9 @@ namespace eosio {
       set_state(connection_state::closed);
 
       if( reconnect && !shutdown ) {
-         my_impl->connections.start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() );
+         my_impl->connections.start_conn_timer( std::chrono::milliseconds( 100 ),
+                                                connection_wptr(),
+                                                connections_manager::timer_type::check );
       }
    }
 
@@ -3117,7 +3125,7 @@ namespace eosio {
    void net_plugin_impl::plugin_shutdown() {
          in_shutdown = true;
 
-         connections.stop_conn_timer();
+         connections.stop_conn_timers();
          {
             fc::lock_guard g( expire_timer_mtx );
             if( expire_timer )
@@ -3821,7 +3829,7 @@ namespace eosio {
          fc::lock_guard g( expire_timer_mtx );
          expire_timer = std::make_unique<boost::asio::steady_timer>( my_impl->thread_pool.get_executor() );
       }
-      connections.start_conn_timer();
+      connections.start_conn_timers();
       start_expire_timer();
    }
 
@@ -4416,12 +4424,12 @@ namespace eosio {
    //----------------------------------------------------------------------------
 
    size_t connections_manager::number_connections() const {
-      std::lock_guard g(connections_mtx);
+      const std::lock_guard g(connections_mtx);
       return connections.size();
    }
 
    void connections_manager::add_supplied_peers(const vector<string>& peers ) {
-      std::lock_guard g(connections_mtx);
+      const std::lock_guard g(connections_mtx);
       supplied_peers.insert( peers.begin(), peers.end() );
    }
 
@@ -4446,16 +4454,15 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
-      std::unique_lock g(connections_mtx);
+      const std::lock_guard g(connections_mtx);
       chain::flat_set<string> peers = supplied_peers;
-      g.unlock();
       for (const auto& peer : peers) {
          resolve_and_connect(peer, p2p_address);
       }
    }
 
    void connections_manager::add( connection_ptr c ) {
-      std::lock_guard g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       boost::system::error_code ec;
       auto endpoint = c->socket->remote_endpoint(ec);
       connections.insert( connection_details{
@@ -4466,9 +4473,8 @@ namespace eosio {
 
    // called by API
    string connections_manager::connect( const string& host, const string& p2p_address ) {
-      std::unique_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       supplied_peers.insert(host);
-      g.unlock();
       return resolve_and_connect( host, p2p_address );
    }
 
@@ -4479,7 +4485,7 @@ namespace eosio {
          return "invalid peer address";
       }
 
-      std::lock_guard g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       if( find_connection_i( peer_address ) )
          return "already connected";
 
@@ -4491,7 +4497,7 @@ namespace eosio {
          [resolver, host = host, port = port, peer_address = peer_address, listen_address = listen_address, this]( const boost::system::error_code& err, const tcp::resolver::results_type& results ) {
             connection_ptr c = std::make_shared<connection>( peer_address, listen_address );
             c->set_heartbeat_timeout( heartbeat_timeout );
-            std::lock_guard g( connections_mtx );
+            const std::lock_guard g( connections_mtx );
             auto [it, inserted] = connections.emplace( connection_details{
                .host = peer_address,
                .c = std::move(c),
@@ -4512,7 +4518,7 @@ namespace eosio {
 
    void connections_manager::update_connection_endpoint(connection_ptr c,
                                                         const tcp::endpoint& endpoint) {
-      std::unique_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_connection>();
       const auto& it = index.find(c);
       if( it != index.end() ) {
@@ -4523,7 +4529,7 @@ namespace eosio {
    }
 
    void connections_manager::connect(const connection_ptr& c) {
-      std::lock_guard g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       const auto& index = connections.get<by_connection>();
       const auto& it = index.find(c);
       if( it != index.end() ) {
@@ -4533,7 +4539,7 @@ namespace eosio {
 
    // called by API
    string connections_manager::disconnect( const string& host ) {
-      std::lock_guard g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       if( auto i = index.find( host ); i != index.end() ) {
          fc_ilog( logger, "disconnecting: ${cid}", ("cid", i->c->connection_id) );
@@ -4546,7 +4552,7 @@ namespace eosio {
    }
 
    void connections_manager::close_all() {
-      std::lock_guard g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
       for( const connection_ptr& c : index ) {
@@ -4557,7 +4563,7 @@ namespace eosio {
    }
 
    std::optional<connection_status> connections_manager::status( const string& host )const {
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto con = find_connection_i( host );
       if( con ) {
          return con->get_status();
@@ -4567,7 +4573,7 @@ namespace eosio {
 
    vector<connection_status> connections_manager::connection_statuses()const {
       vector<connection_status> result;
-      std::shared_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_connection>();
       result.reserve( index.size() );
       for( const connection_ptr& c : index ) {
@@ -4586,28 +4592,42 @@ namespace eosio {
    }
 
    // called from any thread
-   void connections_manager::start_conn_timer() {
-      start_conn_timer(connector_period, {}); // this locks mutex
+   void connections_manager::start_conn_timers() {
+      start_conn_timer(connector_period, {}, timer_type::check); // this locks mutex
+      start_conn_timer(connector_period, {}, timer_type::stats); // this locks mutex
    }
 
    // called from any thread
-   void connections_manager::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr<connection> from_connection) {
-      fc::lock_guard g( connector_check_timer_mtx );
-      if (!connector_check_timer) {
-         connector_check_timer = std::make_unique<boost::asio::steady_timer>( my_impl->thread_pool.get_executor() );
-      }
-      connector_check_timer->expires_from_now( du );
-      connector_check_timer->async_wait( [this, from_connection{std::move(from_connection)}](boost::system::error_code ec) mutable {
+   void connections_manager::start_conn_timer(boost::asio::steady_timer::duration du, 
+                                              std::weak_ptr<connection> from_connection,
+                                              timer_type which) {
+      auto& mtx = which == timer_type::check ? connector_check_timer_mtx : connection_stats_timer_mtx;
+      auto& timer = which == timer_type::check ? connector_check_timer : connection_stats_timer;
+      const auto& func = which == timer_type::check ? &connections_manager::connection_monitor : &connections_manager::connection_statistics_monitor;
+      fc::lock_guard g( mtx );
+      if (!timer) {
+         timer = std::make_unique<boost::asio::steady_timer>( my_impl->thread_pool.get_executor() );
+      }
+      timer->expires_from_now( du );
+      timer->async_wait( [this, from_connection{std::move(from_connection)}, f = func](boost::system::error_code ec) mutable {
          if( !ec ) {
-            connection_monitor(from_connection);
+            (this->*f)(from_connection);
          }
       });
    }
 
-   void connections_manager::stop_conn_timer() {
-      fc::lock_guard g( connector_check_timer_mtx );
-      if (connector_check_timer) {
-         connector_check_timer->cancel();
+   void connections_manager::stop_conn_timers() {
+      {
+         fc::lock_guard g( connector_check_timer_mtx );
+         if (connector_check_timer) {
+            connector_check_timer->cancel();
+         }
+      }
+      {
+         fc::lock_guard g( connection_stats_timer_mtx );
+         if (connection_stats_timer) {
+            connection_stats_timer->cancel();
+         }
       }
    }
 
@@ -4615,20 +4635,18 @@ namespace eosio {
    void connections_manager::connection_monitor(const std::weak_ptr<connection>& from_connection) {
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
       auto from = from_connection.lock();
-      std::unique_lock g( connections_mtx );
+      const std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_connection>();
       auto it = (from ? index.find(from) : index.begin());
       if (it == index.end()) it = index.begin();
       size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;
-      net_plugin::p2p_per_connection_metrics per_connection(index.size());
       while (it != index.end()) {
          if (fc::time_point::now() >= max_time) {
             connection_wptr wit = (*it).c;
-            g.unlock();
             fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) );
             fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}",
                     ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) );
-            start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting
+            start_conn_timer( std::chrono::milliseconds( 1 ), wit, timer_type::check ); // avoid exhausting
             return;
          }
          const connection_ptr& c = it->c;
@@ -4639,6 +4657,59 @@ namespace eosio {
          } else {
             ++num_peers;
          }
+
+         if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
+            if (!c->incoming()) {
+               fc_dlog(logger, "attempting to connect in connection_monitor");
+               if (!c->reconnect()) {
+                  it = index.erase(it);
+                  --num_peers;
+                  ++num_rm;
+                  continue;
+               }
+            } else {
+               --num_clients;
+               ++num_rm;
+               it = index.erase(it);
+               continue;
+            }
+         }
+         ++it;
+      }
+
+      if( num_clients > 0 || num_peers > 0 ) {
+         fc_ilog(logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}",
+                 ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size())("num_bp_peers", num_bp_peers));
+      }
+      fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) );
+      start_conn_timer( connector_period, {}, timer_type::check );
+   }
+
+   // called from any thread
+   void connections_manager::connection_statistics_monitor(const std::weak_ptr<connection>& from_connection) {
+      auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
+      auto from = from_connection.lock();
+      const std::lock_guard g(connections_mtx);
+      auto& index = connections.get<by_connection>();
+      auto it = (from ? index.find(from) : index.begin());
+      if( it == index.end()) it = index.begin();
+      size_t num_clients = 0, num_peers = 0, num_bp_peers = 0;
+      net_plugin::p2p_per_connection_metrics per_connection(index.size());
+      while(it != index.end()) {
+         if(fc::time_point::now() >= max_time) {
+            connection_wptr wit = (*it).c;
+            fc_dlog(logger, "connection statistics monitor ran out of time");
+            start_conn_timer(std::chrono::milliseconds(1), wit, timer_type::stats);
+            return;
+         }
+         const connection_ptr& c = it->c;
+         if(c->is_bp_connection) {
+            ++num_bp_peers;
+         } else if(c->incoming()) {
+            ++num_clients;
+         } else {
+            ++num_peers;
+         }
          if (update_p2p_connection_metrics) {
             fc::unique_lock g_conn(c->conn_mtx);
             boost::asio::ip::address_v6::bytes_type addr = c->remote_endpoint_ip_array;
@@ -4663,41 +4734,11 @@ namespace eosio {
             };
             per_connection.peers.push_back(metrics);
          }
-
-         if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
-            if (!c->incoming()) {
-               g.unlock();
-               fc_dlog(logger, "attempting to connect in connection_monitor");
-               if (!c->reconnect()) {
-                  g.lock();
-                  it = index.erase(it);
-                  g.unlock();
-                  --num_peers;
-                  ++num_rm;
-                  continue;
-               }
-               g.lock();
-            } else {
-               --num_clients;
-               ++num_rm;
-               it = index.erase(it);
-               continue;
-            }
-         }
-         ++it;
       }
-      g.unlock();
 
-      if (update_p2p_connection_metrics) {
+      if(update_p2p_connection_metrics) {
          update_p2p_connection_metrics({num_peers, num_clients, std::move(per_connection)});
       }
-
-      if( num_clients > 0 || num_peers > 0 ) {
-         fc_ilog(logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}",
-                 ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size())("num_bp_peers", num_bp_peers));
-      }
-      fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) );
-      start_conn_timer( connector_period, {});
+      start_conn_timer( connector_period, {}, timer_type::stats );
    }
-
 } // namespace eosio

From 669ed0facc240fe243df09903337a6dab9ddcc9f Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 29 Sep 2023 18:16:27 -0500
Subject: [PATCH 35/61] Revert mutex and lock type changes.

---
 plugins/net_plugin/net_plugin.cpp | 50 ++++++++++++++++++-------------
 1 file changed, 29 insertions(+), 21 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 113c0bc94d..8730ab3b77 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -367,7 +367,7 @@ namespace eosio {
       enum class timer_type { check, stats };
    private:
       alignas(hardware_destructive_interference_size)
-      mutable std::recursive_mutex     connections_mtx;
+      mutable std::shared_mutex        connections_mtx;
       connection_details_index         connections;
       chain::flat_set<string>          supplied_peers;
 
@@ -1191,20 +1191,20 @@ namespace eosio {
 
    template<typename Function>
    bool connections_manager::any_of_supplied_peers( Function&& f ) const {
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       return std::any_of(supplied_peers.begin(), supplied_peers.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_connection( Function&& f ) const {
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
       std::for_each(index.begin(), index.end(), std::forward<Function>(f));
    }
 
    template<typename Function>
    void connections_manager::for_each_block_connection( Function&& f ) const {
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if (c->is_blocks_connection()) {
@@ -1215,14 +1215,14 @@ namespace eosio {
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_connections(UnaryPredicate&& p) const {
-      const std::lock_guard g(connections_mtx);
+      std::shared_lock g(connections_mtx);
       auto& index = connections.get<by_host>();
       return std::any_of(index.cbegin(), index.cend(), std::forward<UnaryPredicate>(p));
    }
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const {
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
       for( const connection_ptr& c : index ) {
          if( c->is_blocks_connection() ) {
@@ -4424,12 +4424,12 @@ namespace eosio {
    //----------------------------------------------------------------------------
 
    size_t connections_manager::number_connections() const {
-      const std::lock_guard g(connections_mtx);
+      std::lock_guard g(connections_mtx);
       return connections.size();
    }
 
    void connections_manager::add_supplied_peers(const vector<string>& peers ) {
-      const std::lock_guard g(connections_mtx);
+      std::lock_guard g(connections_mtx);
       supplied_peers.insert( peers.begin(), peers.end() );
    }
 
@@ -4454,15 +4454,16 @@ namespace eosio {
    }
 
    void connections_manager::connect_supplied_peers(const string& p2p_address) {
-      const std::lock_guard g(connections_mtx);
+      std::unique_lock g(connections_mtx);
       chain::flat_set<string> peers = supplied_peers;
+      g.unlock();
       for (const auto& peer : peers) {
          resolve_and_connect(peer, p2p_address);
       }
    }
 
    void connections_manager::add( connection_ptr c ) {
-      const std::lock_guard g( connections_mtx );
+      std::lock_guard g( connections_mtx );
       boost::system::error_code ec;
       auto endpoint = c->socket->remote_endpoint(ec);
       connections.insert( connection_details{
@@ -4473,8 +4474,9 @@ namespace eosio {
 
    // called by API
    string connections_manager::connect( const string& host, const string& p2p_address ) {
-      const std::lock_guard g( connections_mtx );
+      std::unique_lock g( connections_mtx );
       supplied_peers.insert(host);
+      g.unlock();
       return resolve_and_connect( host, p2p_address );
    }
 
@@ -4485,7 +4487,7 @@ namespace eosio {
          return "invalid peer address";
       }
 
-      const std::lock_guard g( connections_mtx );
+      std::lock_guard g( connections_mtx );
       if( find_connection_i( peer_address ) )
          return "already connected";
 
@@ -4497,7 +4499,7 @@ namespace eosio {
          [resolver, host = host, port = port, peer_address = peer_address, listen_address = listen_address, this]( const boost::system::error_code& err, const tcp::resolver::results_type& results ) {
             connection_ptr c = std::make_shared<connection>( peer_address, listen_address );
             c->set_heartbeat_timeout( heartbeat_timeout );
-            const std::lock_guard g( connections_mtx );
+            std::lock_guard g( connections_mtx );
             auto [it, inserted] = connections.emplace( connection_details{
                .host = peer_address,
                .c = std::move(c),
@@ -4518,7 +4520,7 @@ namespace eosio {
 
    void connections_manager::update_connection_endpoint(connection_ptr c,
                                                         const tcp::endpoint& endpoint) {
-      const std::lock_guard g( connections_mtx );
+      std::unique_lock g( connections_mtx );
       auto& index = connections.get<by_connection>();
       const auto& it = index.find(c);
       if( it != index.end() ) {
@@ -4529,7 +4531,7 @@ namespace eosio {
    }
 
    void connections_manager::connect(const connection_ptr& c) {
-      const std::lock_guard g( connections_mtx );
+      std::lock_guard g( connections_mtx );
       const auto& index = connections.get<by_connection>();
       const auto& it = index.find(c);
       if( it != index.end() ) {
@@ -4539,7 +4541,7 @@ namespace eosio {
 
    // called by API
    string connections_manager::disconnect( const string& host ) {
-      const std::lock_guard g( connections_mtx );
+      std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       if( auto i = index.find( host ); i != index.end() ) {
          fc_ilog( logger, "disconnecting: ${cid}", ("cid", i->c->connection_id) );
@@ -4552,7 +4554,7 @@ namespace eosio {
    }
 
    void connections_manager::close_all() {
-      const std::lock_guard g( connections_mtx );
+      std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
       for( const connection_ptr& c : index ) {
@@ -4563,7 +4565,7 @@ namespace eosio {
    }
 
    std::optional<connection_status> connections_manager::status( const string& host )const {
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       auto con = find_connection_i( host );
       if( con ) {
          return con->get_status();
@@ -4573,7 +4575,7 @@ namespace eosio {
 
    vector<connection_status> connections_manager::connection_statuses()const {
       vector<connection_status> result;
-      const std::lock_guard g( connections_mtx );
+      std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_connection>();
       result.reserve( index.size() );
       for( const connection_ptr& c : index ) {
@@ -4635,7 +4637,7 @@ namespace eosio {
    void connections_manager::connection_monitor(const std::weak_ptr<connection>& from_connection) {
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
       auto from = from_connection.lock();
-      const std::lock_guard g( connections_mtx );
+      std::unique_lock g( connections_mtx );
       auto& index = connections.get<by_connection>();
       auto it = (from ? index.find(from) : index.begin());
       if (it == index.end()) it = index.begin();
@@ -4643,6 +4645,7 @@ namespace eosio {
       while (it != index.end()) {
          if (fc::time_point::now() >= max_time) {
             connection_wptr wit = (*it).c;
+            g.unlock();
             fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) );
             fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}",
                     ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) );
@@ -4660,13 +4663,17 @@ namespace eosio {
 
          if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
             if (!c->incoming()) {
+               g.unlock();
                fc_dlog(logger, "attempting to connect in connection_monitor");
                if (!c->reconnect()) {
+                  g.lock();
                   it = index.erase(it);
+                  g.unlock();
                   --num_peers;
                   ++num_rm;
                   continue;
                }
+               g.lock();
             } else {
                --num_clients;
                ++num_rm;
@@ -4676,6 +4683,7 @@ namespace eosio {
          }
          ++it;
       }
+      g.unlock();
 
       if( num_clients > 0 || num_peers > 0 ) {
          fc_ilog(logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}",
@@ -4741,4 +4749,4 @@ namespace eosio {
       }
       start_conn_timer( connector_period, {}, timer_type::stats );
    }
-} // namespace eosio
+} // namespace eosio
\ No newline at end of file

From a6f7761433f16a9885071894b5512044eee789ef Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Sun, 1 Oct 2023 23:51:00 -0500
Subject: [PATCH 36/61] Revise connection_monitor for thread safety.

---
 plugins/net_plugin/net_plugin.cpp | 40 ++++++++++++++++++++-----------
 1 file changed, 26 insertions(+), 14 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 8730ab3b77..68ed0333c5 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4635,17 +4635,39 @@ namespace eosio {
 
    // called from any thread
    void connections_manager::connection_monitor(const std::weak_ptr<connection>& from_connection) {
+      size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;
+      auto cleanup = [&num_peers, &num_rm, this](vector<connection_ptr>&& reconnecting, 
+                                                 vector<connection_ptr>&& removing) {
+         for( auto c : reconnecting ) {
+            if (!c->reconnect()) {
+               --num_peers;
+               ++num_rm;
+               removing.push_back(c);
+            }
+         }
+         std::unique_lock g( connections_mtx );
+         auto& index = connections.get<by_connection>();
+         for( auto c : removing ) {
+            auto rit = index.find(c);
+            if (rit != index.end()) {
+               index.erase(rit);
+            }
+         }
+         g.unlock();
+
+      };
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
+      std::vector<connection_ptr> reconnecting, removing;
       auto from = from_connection.lock();
       std::unique_lock g( connections_mtx );
       auto& index = connections.get<by_connection>();
       auto it = (from ? index.find(from) : index.begin());
       if (it == index.end()) it = index.begin();
-      size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;
       while (it != index.end()) {
          if (fc::time_point::now() >= max_time) {
             connection_wptr wit = (*it).c;
             g.unlock();
+            cleanup(std::move(reconnecting), std::move(removing));
             fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) );
             fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}",
                     ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) );
@@ -4663,27 +4685,17 @@ namespace eosio {
 
          if (!c->socket_is_open() && c->state() != connection::connection_state::connecting) {
             if (!c->incoming()) {
-               g.unlock();
-               fc_dlog(logger, "attempting to connect in connection_monitor");
-               if (!c->reconnect()) {
-                  g.lock();
-                  it = index.erase(it);
-                  g.unlock();
-                  --num_peers;
-                  ++num_rm;
-                  continue;
-               }
-               g.lock();
+               reconnecting.push_back(c);
             } else {
                --num_clients;
                ++num_rm;
-               it = index.erase(it);
-               continue;
+               removing.push_back(c);
             }
          }
          ++it;
       }
       g.unlock();
+      cleanup(std::move(reconnecting), std::move(removing));
 
       if( num_clients > 0 || num_peers > 0 ) {
          fc_ilog(logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}",

From 7acac0c604a31a29f929a35c08151d744f689e26 Mon Sep 17 00:00:00 2001
From: Kevin Heifner <heifnerk@objectcomputing.com>
Date: Mon, 2 Oct 2023 12:52:40 -0500
Subject: [PATCH 37/61] Misc cleanups

---
 plugins/net_plugin/net_plugin.cpp | 131 ++++++++++++++----------------
 1 file changed, 61 insertions(+), 70 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 68ed0333c5..2e6e5edba6 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -343,24 +343,23 @@ namespace eosio {
 
    class connections_manager {
    public:
-      struct connection_details {
+      struct connection_detail {
          std::string host;
          connection_ptr c;
          tcp::endpoint active_ip;
          tcp::resolver::results_type ips;
-         operator const connection_ptr&() const { return c; }
       };
 
       using connection_details_index = multi_index_container<
-         connection_details,
+         connection_detail,
          indexed_by<
             ordered_non_unique<
                tag<struct by_host>,
-               key<&connection_details::host>
+               key<&connection_detail::host>
             >,
             ordered_unique<
                tag<struct by_connection>,
-               key<&connection_details::c>
+               key<&connection_detail::c>
             >
          >
       >;
@@ -1199,16 +1198,18 @@ namespace eosio {
    void connections_manager::for_each_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
-      std::for_each(index.begin(), index.end(), std::forward<Function>(f));
+      for( const connection_detail& cd : index ) {
+         f(cd.c);
+      }
    }
 
    template<typename Function>
    void connections_manager::for_each_block_connection( Function&& f ) const {
       std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
-      for( const connection_ptr& c : index ) {
-         if (c->is_blocks_connection()) {
-            f(c);
+      for( const connection_detail& cd : index ) {
+         if (cd.c->is_blocks_connection()) {
+            f(cd.c);
          }
       }
    }
@@ -1217,16 +1218,20 @@ namespace eosio {
    bool connections_manager::any_of_connections(UnaryPredicate&& p) const {
       std::shared_lock g(connections_mtx);
       auto& index = connections.get<by_host>();
-      return std::any_of(index.cbegin(), index.cend(), std::forward<UnaryPredicate>(p));
+      for( const connection_detail& cd : index ) {
+         if (p(cd.c))
+            return true;
+      }
+      return false;
    }
 
    template <typename UnaryPredicate>
    bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const {
       std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_host>();
-      for( const connection_ptr& c : index ) {
-         if( c->is_blocks_connection() ) {
-            if (p(c))
+      for( const connection_detail& cd : index ) {
+         if( cd.c->is_blocks_connection() ) {
+            if (p(cd.c))
               return true;
          }
       }
@@ -4466,7 +4471,7 @@ namespace eosio {
       std::lock_guard g( connections_mtx );
       boost::system::error_code ec;
       auto endpoint = c->socket->remote_endpoint(ec);
-      connections.insert( connection_details{
+      connections.insert( connection_detail{
          .host = c->peer_address(), 
          .c = std::move(c),
          .active_ip = endpoint} );
@@ -4500,7 +4505,7 @@ namespace eosio {
             connection_ptr c = std::make_shared<connection>( peer_address, listen_address );
             c->set_heartbeat_timeout( heartbeat_timeout );
             std::lock_guard g( connections_mtx );
-            auto [it, inserted] = connections.emplace( connection_details{
+            auto [it, inserted] = connections.emplace( connection_detail{
                .host = peer_address,
                .c = std::move(c),
                .ips = results
@@ -4524,8 +4529,8 @@ namespace eosio {
       auto& index = connections.get<by_connection>();
       const auto& it = index.find(c);
       if( it != index.end() ) {
-         index.modify(it, [endpoint](connection_details& d) {
-            d.active_ip = endpoint;
+         index.modify(it, [endpoint](connection_detail& cd) {
+            cd.active_ip = endpoint;
          });
       }
    }
@@ -4557,9 +4562,9 @@ namespace eosio {
       std::lock_guard g( connections_mtx );
       auto& index = connections.get<by_host>();
       fc_ilog( logger, "close all ${s} connections", ("s", index.size()) );
-      for( const connection_ptr& c : index ) {
-         fc_dlog( logger, "close: ${cid}", ("cid", c->connection_id) );
-         c->close( false, true );
+      for( const connection_detail& cd : index ) {
+         fc_dlog( logger, "close: ${cid}", ("cid", cd.c->connection_id) );
+         cd.c->close( false, true );
       }
       connections.clear();
    }
@@ -4578,8 +4583,8 @@ namespace eosio {
       std::shared_lock g( connections_mtx );
       auto& index = connections.get<by_connection>();
       result.reserve( index.size() );
-      for( const connection_ptr& c : index ) {
-         result.emplace_back( c->get_status() );
+      for( const connection_detail& cd : index ) {
+         result.emplace_back( cd.c->get_status() );
       }
       return result;
    }
@@ -4597,6 +4602,9 @@ namespace eosio {
    void connections_manager::start_conn_timers() {
       start_conn_timer(connector_period, {}, timer_type::check); // this locks mutex
       start_conn_timer(connector_period, {}, timer_type::stats); // this locks mutex
+      if (update_p2p_connection_metrics) {
+         start_conn_timer(connector_period + connector_period / 2, {}, timer_type::stats); // this locks mutex
+      }
    }
 
    // called from any thread
@@ -4638,23 +4646,18 @@ namespace eosio {
       size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0;
       auto cleanup = [&num_peers, &num_rm, this](vector<connection_ptr>&& reconnecting, 
                                                  vector<connection_ptr>&& removing) {
-         for( auto c : reconnecting ) {
+         for( auto& c : reconnecting ) {
             if (!c->reconnect()) {
                --num_peers;
                ++num_rm;
                removing.push_back(c);
             }
          }
-         std::unique_lock g( connections_mtx );
+         std::scoped_lock g( connections_mtx );
          auto& index = connections.get<by_connection>();
-         for( auto c : removing ) {
-            auto rit = index.find(c);
-            if (rit != index.end()) {
-               index.erase(rit);
-            }
+         for( auto& c : removing ) {
+            index.erase(c);
          }
-         g.unlock();
-
       };
       auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
       std::vector<connection_ptr> reconnecting, removing;
@@ -4707,21 +4710,13 @@ namespace eosio {
 
    // called from any thread
    void connections_manager::connection_statistics_monitor(const std::weak_ptr<connection>& from_connection) {
-      auto max_time = fc::time_point::now().safe_add(max_cleanup_time);
+      assert(update_p2p_connection_metrics);
       auto from = from_connection.lock();
-      const std::lock_guard g(connections_mtx);
+      std::shared_lock g(connections_mtx);
       auto& index = connections.get<by_connection>();
-      auto it = (from ? index.find(from) : index.begin());
-      if( it == index.end()) it = index.begin();
       size_t num_clients = 0, num_peers = 0, num_bp_peers = 0;
       net_plugin::p2p_per_connection_metrics per_connection(index.size());
-      while(it != index.end()) {
-         if(fc::time_point::now() >= max_time) {
-            connection_wptr wit = (*it).c;
-            fc_dlog(logger, "connection statistics monitor ran out of time");
-            start_conn_timer(std::chrono::milliseconds(1), wit, timer_type::stats);
-            return;
-         }
+      for (auto it = index.begin(); it != index.end(); ++it) {
          const connection_ptr& c = it->c;
          if(c->is_bp_connection) {
             ++num_bp_peers;
@@ -4730,35 +4725,31 @@ namespace eosio {
          } else {
             ++num_peers;
          }
-         if (update_p2p_connection_metrics) {
-            fc::unique_lock g_conn(c->conn_mtx);
-            boost::asio::ip::address_v6::bytes_type addr = c->remote_endpoint_ip_array;
-            g_conn.unlock();
-            net_plugin::p2p_per_connection_metrics::connection_metric metrics{
-                 .connection_id = c->connection_id
-               , .address = addr
-               , .port = c->get_remote_endpoint_port()
-               , .accepting_blocks = c->is_blocks_connection()
-               , .last_received_block = c->get_last_received_block_num()
-               , .first_available_block = c->get_peer_start_block_num()
-               , .last_available_block = c->get_peer_head_block_num()
-               , .unique_first_block_count = c->get_unique_blocks_rcvd_count()
-               , .latency = c->get_peer_ping_time_ns()
-               , .bytes_received = c->get_bytes_received()
-               , .last_bytes_received = c->get_last_bytes_received()
-               , .bytes_sent = c->get_bytes_sent()
-               , .last_bytes_sent = c->get_last_bytes_sent()
-               , .block_sync_bytes_sent = c->get_block_sync_bytes_sent()
-               , .connection_start_time = c->connection_start_time
-               , .log_p2p_address = c->log_p2p_address
-            };
-            per_connection.peers.push_back(metrics);
-         }
-      }
-
-      if(update_p2p_connection_metrics) {
-         update_p2p_connection_metrics({num_peers, num_clients, std::move(per_connection)});
+         fc::unique_lock g_conn(c->conn_mtx);
+         boost::asio::ip::address_v6::bytes_type addr = c->remote_endpoint_ip_array;
+         g_conn.unlock();
+         net_plugin::p2p_per_connection_metrics::connection_metric metrics{
+              .connection_id = c->connection_id
+            , .address = addr
+            , .port = c->get_remote_endpoint_port()
+            , .accepting_blocks = c->is_blocks_connection()
+            , .last_received_block = c->get_last_received_block_num()
+            , .first_available_block = c->get_peer_start_block_num()
+            , .last_available_block = c->get_peer_head_block_num()
+            , .unique_first_block_count = c->get_unique_blocks_rcvd_count()
+            , .latency = c->get_peer_ping_time_ns()
+            , .bytes_received = c->get_bytes_received()
+            , .last_bytes_received = c->get_last_bytes_received()
+            , .bytes_sent = c->get_bytes_sent()
+            , .last_bytes_sent = c->get_last_bytes_sent()
+            , .block_sync_bytes_sent = c->get_block_sync_bytes_sent()
+            , .connection_start_time = c->connection_start_time
+            , .log_p2p_address = c->log_p2p_address
+         };
+         per_connection.peers.push_back(metrics);
       }
+      g.unlock();
+      update_p2p_connection_metrics({num_peers+num_bp_peers, num_clients, std::move(per_connection)});
       start_conn_timer( connector_period, {}, timer_type::stats );
    }
 } // namespace eosio
\ No newline at end of file

From ff7a8a1c1e0e04e092f5980a9bd4e9f3932c8ce0 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Wed, 4 Oct 2023 00:05:13 -0500
Subject: [PATCH 38/61] Add block sync bytes received metric and use it in sync
 throttle test.

---
 .../include/eosio/net_plugin/net_plugin.hpp   |  1 +
 plugins/net_plugin/net_plugin.cpp             | 12 ++-
 plugins/prometheus_plugin/metrics.hpp         |  1 +
 tests/p2p_sync_throttle_test.py               | 99 +++++++++++++++++--
 4 files changed, 100 insertions(+), 13 deletions(-)

diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
index 1db805ac4f..6a797bd18a 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
@@ -54,6 +54,7 @@ namespace eosio {
                std::chrono::nanoseconds last_bytes_received{0};
                size_t bytes_sent{0};
                std::chrono::nanoseconds last_bytes_sent{0};
+               size_t block_sync_bytes_received{0};
                size_t block_sync_bytes_sent{0};
                std::chrono::nanoseconds connection_start_time{0};
                std::string log_p2p_address;
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 2e6e5edba6..a4137d453d 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -851,6 +851,7 @@ namespace eosio {
       std::chrono::nanoseconds get_last_bytes_received() const { return last_bytes_received.load(); }
       size_t get_bytes_sent() const { return bytes_sent.load(); }
       std::chrono::nanoseconds get_last_bytes_sent() const { return last_bytes_sent.load(); }
+      size_t get_block_sync_bytes_received() const { return block_sync_bytes_received.load(); }
       size_t get_block_sync_bytes_sent() const { return block_sync_bytes_sent.load(); }
       boost::asio::ip::port_type get_remote_endpoint_port() const { return remote_endpoint_port.load(); }
       void set_heartbeat_timeout(std::chrono::milliseconds msec) {
@@ -888,6 +889,7 @@ namespace eosio {
       std::atomic<size_t>             bytes_received{0};
       std::atomic<std::chrono::nanoseconds>   last_bytes_received{0ns};
       std::atomic<size_t>             bytes_sent{0};
+      std::atomic<size_t>             block_sync_bytes_received{0};
       std::atomic<size_t>             block_sync_bytes_sent{0};
       std::atomic<std::chrono::nanoseconds>   last_bytes_sent{0ns};
       std::atomic<boost::asio::ip::port_type> remote_endpoint_port{0};
@@ -1739,6 +1741,7 @@ namespace eosio {
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
             if( current_rate >= block_sync_rate_limit ) {
+               peer_dlog( this, "throttling block sync to peer ${host}:${port}", ("host", log_remote_endpoint_ip)("port", log_remote_endpoint_port));
                return false;
             }
          }
@@ -3019,7 +3022,6 @@ namespace eosio {
       fc::raw::unpack( peek_ds, which ); // throw away
       block_header bh;
       fc::raw::unpack( peek_ds, bh );
-
       const block_id_type blk_id = bh.calculate_id();
       const uint32_t blk_num = last_received_block_num = block_header::num_from_id(blk_id);
       // don't add_peer_block because we have not validated this block header yet
@@ -3053,6 +3055,7 @@ namespace eosio {
             return true;
          }
       } else {
+         block_sync_bytes_received += message_length;
          my_impl->sync_master->sync_recv_block(shared_from_this(), blk_id, blk_num, false);
       }
 
@@ -4728,7 +4731,8 @@ namespace eosio {
          fc::unique_lock g_conn(c->conn_mtx);
          boost::asio::ip::address_v6::bytes_type addr = c->remote_endpoint_ip_array;
          g_conn.unlock();
-         net_plugin::p2p_per_connection_metrics::connection_metric metrics{
+         per_connection.peers.emplace_back(
+            net_plugin::p2p_per_connection_metrics::connection_metric{
               .connection_id = c->connection_id
             , .address = addr
             , .port = c->get_remote_endpoint_port()
@@ -4742,11 +4746,11 @@ namespace eosio {
             , .last_bytes_received = c->get_last_bytes_received()
             , .bytes_sent = c->get_bytes_sent()
             , .last_bytes_sent = c->get_last_bytes_sent()
+            , .block_sync_bytes_received = c->get_block_sync_bytes_received()
             , .block_sync_bytes_sent = c->get_block_sync_bytes_sent()
             , .connection_start_time = c->connection_start_time
             , .log_p2p_address = c->log_p2p_address
-         };
-         per_connection.peers.push_back(metrics);
+         });
       }
       g.unlock();
       update_p2p_connection_metrics({num_peers+num_bp_peers, num_clients, std::move(per_connection)});
diff --git a/plugins/prometheus_plugin/metrics.hpp b/plugins/prometheus_plugin/metrics.hpp
index f67620317e..5562896284 100644
--- a/plugins/prometheus_plugin/metrics.hpp
+++ b/plugins/prometheus_plugin/metrics.hpp
@@ -187,6 +187,7 @@ struct catalog_type {
          add_and_set_gauge("last_bytes_received", peer.last_bytes_received.count());
          add_and_set_gauge("bytes_sent", peer.bytes_sent);
          add_and_set_gauge("last_bytes_sent", peer.last_bytes_sent.count());
+         add_and_set_gauge("block_sync_bytes_received", peer.block_sync_bytes_received);
          add_and_set_gauge("block_sync_bytes_sent", peer.block_sync_bytes_sent);
          add_and_set_gauge("connection_start_time", peer.connection_start_time.count());
          add_and_set_gauge(peer.log_p2p_address, 0); // Empty gauge; we only want the label
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 0de560d44e..647ce2d3da 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -1,5 +1,8 @@
 #!/usr/bin/env python3
 
+import math
+import re
+import requests
 import signal
 import time
 
@@ -13,6 +16,7 @@
 #
 ###############################################################
 
+PROMETHEUS_URL = '/v1/prometheus/metrics'
 
 Print=Utils.Print
 errorExit=Utils.errorExit
@@ -38,6 +42,19 @@
 cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs)
 walletMgr=WalletMgr(True)
 
+def readMetrics(host: str, port: str):
+    response = requests.get(f'http://{host}:{port}{PROMETHEUS_URL}', timeout=10)
+    if response.status_code != 200:
+        errorExit(f'Prometheus metrics URL returned {response.status_code}: {response.url}')
+    return response
+
+def extractPrometheusMetric(connID: str, metric: str, text: str):
+    searchStr = f'nodeos_p2p_connections{{connid_{connID}="{metric}"}} '
+    begin = text.find(searchStr) + len(searchStr)
+    return int(text[begin:response.text.find('\n', begin)])
+
+prometheusHostPortPattern = re.compile(r'^nodeos_p2p_connections.connid_([0-9])="localhost:([0-9]*)', re.MULTILINE)
+
 try:
     TestHelper.printSystemInfo("BEGIN")
 
@@ -46,10 +63,7 @@
     Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}')
 
     Print("Stand up cluster")
-    if args.plugin:
-        extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)])
-    else:
-        extraNodeosArgs = ''
+    extraNodeosArgs = '--plugin eosio::prometheus_plugin --connection-cleanup-period 3'
     # Custom topology is a line of singlely connected nodes from highest node number in sequence to lowest,
     # the reverse of the usual TestHarness line topology.
     if cluster.launch(pnodes=pnodes, unstartedNodes=2, totalNodes=total_nodes, prodCount=prod_count, 
@@ -112,19 +126,86 @@
     cluster.launchUnstarted(2)
 
     throttledNode = cluster.getNode(3)
-    time.sleep(15)
+    while True:
+        try:
+            response = readMetrics(throttlingNode.host, throttlingNode.port)
+        except (requests.ConnectionError, requests.ReadTimeout) as e:
+            # waiting for node to finish startup and respond
+            time.sleep(0.5)
+        else:
+            connPorts = prometheusHostPortPattern.findall(response.text)
+            if len(connPorts) < 3:
+                # wait for node to be connected
+                time.sleep(0.5)
+                continue
+            Print('Throttling Node Start State')
+            #Print(response.text)
+            throttlingNodePortMap = {port: id for id, port in connPorts}
+            startSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
+                                                                   'block_sync_bytes_sent',
+                                                                   response.text)
+            Print(f'Start sync throttling bytes sent: {startSyncThrottlingBytesSent}')
+            break
+    while True:
+        try:
+            response = readMetrics(throttledNode.host, throttledNode.port)
+        except (requests.ConnectionError, requests.ReadTimeout) as e:
+            # waiting for node to finish startup and respond
+            time.sleep(0.5)
+        else:
+            if 'nodeos_p2p_connections{connid_2' not in response.text:
+                # wait for sending node to be connected
+                continue
+            Print('Throttled Node Start State')
+            #Print(response.text)
+            connPorts = prometheusHostPortPattern.findall(response.text)
+            throttledNodePortMap = {port: id for id, port in connPorts}
+            startSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
+                                                                      'block_sync_bytes_received',
+                                                                      response.text)
+            Print(f'Start sync throttled bytes received: {startSyncThrottledBytesReceived}')
+            break
+
     # Throttling node was offline during block generation and once online receives blocks as fast as possible while
     # transmitting blocks to the next node in line at the above throttle setting.
     assert throttlingNode.waitForBlock(endLargeBlocksHeadBlock), f'wait for block {endLargeBlocksHeadBlock}  on throttled node timed out'
     endThrottlingSync = time.time()
+    try:
+        response = readMetrics(throttlingNode.host, throttlingNode.port)
+    except (requests.ConnectionError, requests.ReadTimeout) as e:
+        errorExit(str(e))
+    else:
+        Print('Throttling Node End State')
+        #Print(response.text)
+        endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
+                                                             'block_sync_bytes_sent',
+                                                             response.text)
+        Print(f'End sync throttling bytes sent: {endSyncThrottlingBytesSent}')
     # Throttled node is connecting to a listen port with a block sync throttle applied so it will receive
     # blocks more slowly during syncing than an unthrottled node.
     assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
     endThrottledSync = time.time()
-    Print(f'Unthrottled sync time: {endThrottlingSync - clusterStart} seconds')
-    Print(f'Throttled sync time: {endThrottledSync - clusterStart} seconds')
-    # 15 seconds chosen as the minimum reasonable sync time differential given the throttle and the average block size.
-    assert endThrottledSync - clusterStart > endThrottlingSync - clusterStart + 15, 'Throttled sync time must be at least 15 seconds greater than unthrottled'
+    try:
+        response = readMetrics(throttledNode.host, throttledNode.port)
+    except (requests.ConnectionError, requests.ReadTimeout) as e:
+        errorExit(str(e))
+    else:
+        Print('Throttled Node End State')
+        #Print(response.text)
+        endSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
+                                                                'block_sync_bytes_received',
+                                                                response.text)
+        Print(f'End sync throttled bytes received: {endSyncThrottledBytesReceived}')
+    throttlingElapsed = endThrottlingSync - clusterStart
+    throttledElapsed = endThrottledSync - clusterStart
+    Print(f'Unthrottled sync time: {throttlingElapsed} seconds')
+    Print(f'Throttled sync time: {throttledElapsed} seconds')
+    # Sanity check
+    assert throttledElapsed > throttlingElapsed + 15, 'Throttled sync time must be at least 15 seconds greater than unthrottled'
+    # Calculate block receive rate
+    calculatedRate = (endSyncThrottledBytesReceived - startSyncThrottledBytesReceived)/throttledElapsed
+    #assert math.isclose(calculatedRate, 40000, rel_tol=0.01), f'Throttled bytes receive rate must be near 40,000, was {calculatedRate}'
+    assert calculatedRate < 40000, f'Throttled bytes receive rate must be less than 40,000, was {calculatedRate}'
 
     testSuccessful=True
 finally:

From 6b2fe6396984023e761fb88673474ffa24cd8201 Mon Sep 17 00:00:00 2001
From: Peter Oschwald <oschwaldp@objectcomputing.com>
Date: Wed, 4 Oct 2023 08:03:20 -0500
Subject: [PATCH 39/61] Add requests module for test.

---
 .cicd/platforms/ubuntu20.Dockerfile | 1 +
 .cicd/platforms/ubuntu22.Dockerfile | 1 +
 2 files changed, 2 insertions(+)

diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile
index fe7aaea80e..89783cb40e 100644
--- a/.cicd/platforms/ubuntu20.Dockerfile
+++ b/.cicd/platforms/ubuntu20.Dockerfile
@@ -12,6 +12,7 @@ RUN apt-get update && apt-get upgrade -y && \
                        llvm-11-dev          \
                        ninja-build          \
                        python3-numpy        \
+                       python3-requests     \
                        file                 \
                        zlib1g-dev           \
                        zstd &&              \
diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile
index 275d52a4c7..8fcca67050 100644
--- a/.cicd/platforms/ubuntu22.Dockerfile
+++ b/.cicd/platforms/ubuntu22.Dockerfile
@@ -11,6 +11,7 @@ RUN apt-get update && apt-get upgrade -y && \
                        llvm-11-dev          \
                        ninja-build          \
                        python3-numpy        \
+                       python3-requests     \
                        file                 \
                        zlib1g-dev           \
                        zstd

From 03cfc26bb397e9b0348f88a14db1eea5159fc796 Mon Sep 17 00:00:00 2001
From: Kevin Heifner <heifnerk@objectcomputing.com>
Date: Fri, 6 Oct 2023 12:09:29 -0500
Subject: [PATCH 40/61] GH-1507 Use error log for truly error conditions that
 require node operator intervention

---
 plugins/net_plugin/net_plugin.cpp | 74 ++++++++++++++++---------------
 1 file changed, 39 insertions(+), 35 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 807660bd6f..afee66a612 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1296,7 +1296,7 @@ namespace eosio {
       boost::system::error_code ec;
       socket->set_option( nodelay, ec );
       if( ec ) {
-         peer_elog( this, "connection failed (set_option): ${e1}", ( "e1", ec.message() ) );
+         peer_wlog( this, "connection failed (set_option): ${e1}", ( "e1", ec.message() ) );
          close();
          return false;
       } else {
@@ -1472,6 +1472,7 @@ namespace eosio {
             peer_ilog( this, "fetch block by id returned null, id ${id}", ("id", blkid) );
          }
       } catch( const assert_exception& ex ) {
+         // possible corrupted block log
          peer_elog( this, "caught assert on fetch_block_by_id, ${ex}, id ${id}", ("ex", ex.to_string())("id", blkid) );
       } catch( ... ) {
          peer_elog( this, "caught other exception fetching block id ${id}", ("id", blkid) );
@@ -1599,7 +1600,7 @@ namespace eosio {
 
                if( ec ) {
                   if( ec.value() != boost::asio::error::eof ) {
-                     peer_elog( c, "Error sending to peer: ${i}", ( "i", ec.message() ) );
+                     peer_wlog( c, "Error sending to peer: ${i}", ( "i", ec.message() ) );
                   } else {
                      peer_wlog( c, "connection closure detected on write" );
                   }
@@ -2042,7 +2043,7 @@ namespace eosio {
 
       // verify there is an available source
       if( !new_sync_source ) {
-         fc_elog( logger, "Unable to continue syncing at this time");
+         fc_wlog( logger, "Unable to continue syncing at this time");
          sync_source.reset();
          sync_known_lib_num = chain_info.lib_num;
          sync_last_requested_num = 0;
@@ -2289,7 +2290,7 @@ namespace eosio {
                   "sync_recv_notice only called on catch_up" );
       if (msg.known_blocks.mode == catch_up) {
          if (msg.known_blocks.ids.empty()) {
-            peer_elog( c, "got a catch up with ids size = 0" );
+            peer_wlog( c, "got a catch up with ids size = 0" );
          } else {
             const block_id_type& id = msg.known_blocks.ids.back();
             peer_ilog( c, "notice_message, pending ${p}, blk_num ${n}, id ${id}...",
@@ -2577,7 +2578,7 @@ namespace eosio {
    void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) {
       if (msg.known_trx.mode == normal) {
       } else if (msg.known_trx.mode != none) {
-         peer_elog( c, "passed a notice_message with something other than a normal on none known_trx" );
+         peer_wlog( c, "passed a notice_message with something other than a normal on none known_trx" );
          return;
       }
       if (msg.known_blocks.mode == normal) {
@@ -2588,7 +2589,7 @@ namespace eosio {
             }
          }
       } else if (msg.known_blocks.mode != none) {
-         peer_elog( c, "passed a notice_message with something other than a normal on none known_blocks" );
+         peer_wlog( c, "passed a notice_message with something other than a normal on none known_blocks" );
          return;
       }
    }
@@ -2691,7 +2692,7 @@ namespace eosio {
                if( !err ) {
                   c->connect( resolver, endpoints );
                } else {
-                  fc_elog( logger, "Unable to resolve ${host}:${port} ${error}",
+                  fc_wlog( logger, "Unable to resolve ${host}:${port} ${error}",
                            ("host", host)("port", port)( "error", err.message() ) );
                   c->set_state(connection_state::closed);
                   ++c->consecutive_immediate_connection_close;
@@ -2716,7 +2717,7 @@ namespace eosio {
                   c->send_time();
                }
             } else {
-               fc_elog( logger, "connection failed to ${a}, ${error}", ("a", c->peer_address())( "error", err.message()));
+               fc_ilog( logger, "connection failed to ${a}, ${error}", ("a", c->peer_address())( "error", err.message()));
                c->close( false );
                if (my_impl->increment_failed_p2p_connections) {
                   my_impl->increment_failed_p2p_connections();
@@ -2733,7 +2734,7 @@ namespace eosio {
       const auto&               paddr_add = socket.remote_endpoint(rec).address();
       string                    paddr_str;
       if (rec) {
-         fc_elog(logger, "Error getting remote endpoint: ${m}", ("m", rec.message()));
+         fc_ilog(logger, "Unable to get remote endpoint: ${m}", ("m", rec.message()));
       } else {
          paddr_str        = paddr_add.to_string();
          connections.for_each_connection([&visitors, &from_addr, &paddr_str](auto& conn) {
@@ -2939,7 +2940,7 @@ namespace eosio {
          }
 
       } catch( const fc::exception& e ) {
-         peer_elog( this, "Exception in handling message: ${s}", ("s", e.to_detail_string()) );
+         peer_wlog( this, "Exception in handling message: ${s}", ("s", e.to_detail_string()) );
          close();
          return false;
       }
@@ -3156,7 +3157,7 @@ namespace eosio {
    // called from connection strand
    void connection::handle_message( const handshake_message& msg ) {
       if( !is_valid( msg ) ) {
-         peer_elog( this, "bad handshake message");
+         peer_wlog( this, "bad handshake message");
          no_retry = go_away_reason::fatal_other;
          enqueue( go_away_message( fatal_other ) );
          return;
@@ -3174,7 +3175,7 @@ namespace eosio {
       set_state(connection_state::connected);
       if (msg.generation == 1) {
          if( msg.node_id == my_impl->node_id) {
-            peer_elog( this, "Self connection detected node_id ${id}. Closing connection", ("id", msg.node_id) );
+            peer_ilog( this, "Self connection detected node_id ${id}. Closing connection", ("id", msg.node_id) );
             no_retry = go_away_reason::self;
             enqueue( go_away_message( go_away_reason::self ) );
             return;
@@ -3249,7 +3250,7 @@ namespace eosio {
          }
 
          if( msg.chain_id != my_impl->chain_id ) {
-            peer_elog( this, "Peer on a different chain. Closing connection" );
+            peer_ilog( this, "Peer on a different chain. Closing connection" );
             no_retry = go_away_reason::wrong_chain;
             enqueue( go_away_message(go_away_reason::wrong_chain) );
             return;
@@ -3266,7 +3267,7 @@ namespace eosio {
          short_conn_node_id = conn_node_id.str().substr( 0, 7 );
 
          if( !my_impl->authenticate_peer( msg ) ) {
-            peer_elog( this, "Peer not authenticated.  Closing connection." );
+            peer_wlog( this, "Peer not authenticated.  Closing connection." );
             no_retry = go_away_reason::authentication;
             enqueue( go_away_message( go_away_reason::authentication ) );
             return;
@@ -3291,9 +3292,9 @@ namespace eosio {
                on_fork = true;
             }
             if( on_fork ) {
-                  peer_elog( this, "Peer chain is forked, sending: forked go away" );
-                  no_retry = go_away_reason::forked;
-                  enqueue( go_away_message( go_away_reason::forked ) );
+               peer_wlog( this, "Peer chain is forked, sending: forked go away" );
+               no_retry = go_away_reason::forked;
+               enqueue( go_away_message( go_away_reason::forked ) );
             }
          }
 
@@ -3428,7 +3429,7 @@ namespace eosio {
       //
       set_state(connection_state::connected);
       if( msg.known_blocks.ids.size() > 2 ) {
-         peer_elog( this, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection",
+         peer_wlog( this, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection",
                     ("s", msg.known_blocks.ids.size()) );
          close( false );
          return;
@@ -3480,7 +3481,7 @@ namespace eosio {
          break;
       }
       default: {
-         peer_elog( this, "bad notice_message : invalid known_blocks.mode ${m}",
+         peer_wlog( this, "bad notice_message : invalid known_blocks.mode ${m}",
                     ("m", static_cast<uint32_t>(msg.known_blocks.mode)) );
       }
       }
@@ -3488,7 +3489,7 @@ namespace eosio {
 
    void connection::handle_message( const request_message& msg ) {
       if( msg.req_blocks.ids.size() > 1 ) {
-         peer_elog( this, "Invalid request_message, req_blocks.ids.size ${s}, closing",
+         peer_wlog( this, "Invalid request_message, req_blocks.ids.size ${s}, closing",
                     ("s", msg.req_blocks.ids.size()) );
          close();
          return;
@@ -3519,7 +3520,7 @@ namespace eosio {
          // no break
       case normal :
          if( !msg.req_trx.ids.empty() ) {
-            peer_elog( this, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) );
+            peer_wlog( this, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) );
             close();
             return;
          }
@@ -3569,7 +3570,7 @@ namespace eosio {
             if( !trace->except ) {
                fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) );
             } else {
-               fc_elog( logger, "bad packed_transaction : ${m}", ("m", trace->except->what()));
+               fc_ilog( logger, "bad packed_transaction : ${m}", ("m", trace->except->what()));
             }
          }
          connection_ptr conn = weak.lock();
@@ -3601,11 +3602,11 @@ namespace eosio {
             bsp = cc.create_block_state( id, ptr );
          } catch( const fc::exception& ex ) {
             exception = true;
-            fc_elog( logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}",
+            fc_ilog( logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}",
                      ("cid", cid)("n", ptr->block_num())("id", id.str().substr(8,16))("m",ex.to_string()));
          } catch( ... ) {
             exception = true;
-            fc_elog( logger, "bad block connection ${cid}: #${n} ${id}...: unknown exception",
+            fc_wlog( logger, "bad block connection ${cid}: #${n} ${id}...: unknown exception",
                      ("cid", cid)("n", ptr->block_num())("id", id.str().substr(8,16)));
          }
          if( exception ) {
@@ -3653,8 +3654,11 @@ namespace eosio {
             });
             return;
          }
-      } catch(...) {
-         fc_elog( logger, "Caught an unknown exception trying to fetch block ${id}", ("id", blk_id) );
+      } catch( const assert_exception& ex ) {
+         // possible corrupted block log
+         fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id}, conn ${c}", ("ex", ex.to_string())("id", blk_id)("c", connection_id) );
+      } catch( ... ) {
+         fc_elog( logger, "caught an unknown exception trying to fetch block ${id}, conn ${c}", ("id", blk_id)("c", connection_id) );
       }
 
       fc::microseconds age( fc::time_point::now() - block->timestamp);
@@ -3667,23 +3671,23 @@ namespace eosio {
          accepted = my_impl->chain_plug->accept_block(block, blk_id, bsp);
          my_impl->update_chain_info();
       } catch( const unlinkable_block_exception &ex) {
-         fc_elog(logger, "unlinkable_block_exception connection ${cid}: #${n} ${id}...: ${m}",
+         fc_ilog(logger, "unlinkable_block_exception connection ${cid}: #${n} ${id}...: ${m}",
                  ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
          reason = unlinkable;
       } catch( const block_validate_exception &ex ) {
-         fc_elog(logger, "block_validate_exception connection ${cid}: #${n} ${id}...: ${m}",
+         fc_ilog(logger, "block_validate_exception connection ${cid}: #${n} ${id}...: ${m}",
                  ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
          reason = validation;
       } catch( const assert_exception &ex ) {
-         fc_elog(logger, "block assert_exception connection ${cid}: #${n} ${id}...: ${m}",
+         fc_wlog(logger, "block assert_exception connection ${cid}: #${n} ${id}...: ${m}",
                  ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
          reason = fatal_other;
       } catch( const fc::exception &ex ) {
-         fc_elog(logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}",
+         fc_ilog(logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}",
                  ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string()));
          reason = fatal_other;
       } catch( ... ) {
-         fc_elog(logger, "bad block connection ${cid}: #${n} ${id}...: unknown exception",
+         fc_wlog(logger, "bad block connection ${cid}: #${n} ${id}...: unknown exception",
                  ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16)));
          reason = fatal_other;
       }
@@ -3832,7 +3836,7 @@ namespace eosio {
          if(producer_plug != nullptr)
             found_producer_key = producer_plug->is_producer_key(msg.key);
          if( allowed_it == allowed_peers.end() && private_it == private_keys.end() && !found_producer_key) {
-            fc_elog( logger, "Peer ${peer} sent a handshake with an unauthorized key: ${key}.",
+            fc_wlog( logger, "Peer ${peer} sent a handshake with an unauthorized key: ${key}.",
                      ("peer", msg.p2p_address)("key", msg.key) );
             return false;
          }
@@ -3841,7 +3845,7 @@ namespace eosio {
       if(msg.sig != chain::signature_type() && msg.token != sha256()) {
          sha256 hash = fc::sha256::hash(msg.time);
          if(hash != msg.token) {
-            fc_elog( logger, "Peer ${peer} sent a handshake with an invalid token.", ("peer", msg.p2p_address) );
+            fc_wlog( logger, "Peer ${peer} sent a handshake with an invalid token.", ("peer", msg.p2p_address) );
             return false;
          }
          chain::public_key_type peer_key;
@@ -3849,11 +3853,11 @@ namespace eosio {
             peer_key = crypto::public_key(msg.sig, msg.token, true);
          }
          catch (const std::exception& /*e*/) {
-            fc_elog( logger, "Peer ${peer} sent a handshake with an unrecoverable key.", ("peer", msg.p2p_address) );
+            fc_wlog( logger, "Peer ${peer} sent a handshake with an unrecoverable key.", ("peer", msg.p2p_address) );
             return false;
          }
          if((allowed_connections & (Producers | Specified)) && peer_key != msg.key) {
-            fc_elog( logger, "Peer ${peer} sent a handshake with an unauthenticated key.", ("peer", msg.p2p_address) );
+            fc_wlog( logger, "Peer ${peer} sent a handshake with an unauthenticated key.", ("peer", msg.p2p_address) );
             return false;
          }
       }

From 1e5b4275d19fc760f9e7ff009f3cbc5a4d6c6c5c Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 6 Oct 2023 14:59:21 -0500
Subject: [PATCH 41/61] Add throttling flag to Prometheus peer data and use it
 in sync test.

Remove dependency on python requests package.
Remove locale-aware parsing of sync throttle rate.
Prevent transmitting peer from throttling while not in sync mode.
Add timeouts to throttle sync test.
---
 .cicd/platforms/ubuntu20.Dockerfile           |   1 -
 .cicd/platforms/ubuntu22.Dockerfile           |   1 -
 .../include/eosio/net_plugin/net_plugin.hpp   |   1 +
 plugins/net_plugin/net_plugin.cpp             |  14 +-
 plugins/prometheus_plugin/metrics.hpp         |   1 +
 tests/p2p_sync_throttle_test.py               | 126 +++++++++---------
 6 files changed, 76 insertions(+), 68 deletions(-)

diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile
index 89783cb40e..fe7aaea80e 100644
--- a/.cicd/platforms/ubuntu20.Dockerfile
+++ b/.cicd/platforms/ubuntu20.Dockerfile
@@ -12,7 +12,6 @@ RUN apt-get update && apt-get upgrade -y && \
                        llvm-11-dev          \
                        ninja-build          \
                        python3-numpy        \
-                       python3-requests     \
                        file                 \
                        zlib1g-dev           \
                        zstd &&              \
diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile
index 8fcca67050..275d52a4c7 100644
--- a/.cicd/platforms/ubuntu22.Dockerfile
+++ b/.cicd/platforms/ubuntu22.Dockerfile
@@ -11,7 +11,6 @@ RUN apt-get update && apt-get upgrade -y && \
                        llvm-11-dev          \
                        ninja-build          \
                        python3-numpy        \
-                       python3-requests     \
                        file                 \
                        zlib1g-dev           \
                        zstd
diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
index 6a797bd18a..1548006803 100644
--- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
+++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp
@@ -56,6 +56,7 @@ namespace eosio {
                std::chrono::nanoseconds last_bytes_sent{0};
                size_t block_sync_bytes_received{0};
                size_t block_sync_bytes_sent{0};
+               bool block_sync_throttling{false};
                std::chrono::nanoseconds connection_start_time{0};
                std::string log_p2p_address;
             };
diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 8bec0ad043..565729e203 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -853,6 +853,7 @@ namespace eosio {
       std::chrono::nanoseconds get_last_bytes_sent() const { return last_bytes_sent.load(); }
       size_t get_block_sync_bytes_received() const { return block_sync_bytes_received.load(); }
       size_t get_block_sync_bytes_sent() const { return block_sync_bytes_sent.load(); }
+      bool get_block_sync_throttling() const { return block_sync_throttling.load(); }
       boost::asio::ip::port_type get_remote_endpoint_port() const { return remote_endpoint_port.load(); }
       void set_heartbeat_timeout(std::chrono::milliseconds msec) {
          hb_timeout = msec;
@@ -891,6 +892,7 @@ namespace eosio {
       std::atomic<size_t>             bytes_sent{0};
       std::atomic<size_t>             block_sync_bytes_received{0};
       std::atomic<size_t>             block_sync_bytes_sent{0};
+      std::atomic<bool>               block_sync_throttling{false};
       std::atomic<std::chrono::nanoseconds>   last_bytes_sent{0ns};
       std::atomic<boost::asio::ip::port_type> remote_endpoint_port{0};
 
@@ -1737,14 +1739,16 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
-         if( block_sync_rate_limit > 0 ) {
+         if( block_sync_rate_limit > 0 && peer_syncing_from_us ) {
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
             if( current_rate >= block_sync_rate_limit ) {
+               block_sync_throttling = true;
                peer_dlog( this, "throttling block sync to peer ${host}:${port}", ("host", log_remote_endpoint_ip)("port", log_remote_endpoint_port));
                return false;
             }
          }
+         block_sync_throttling = false;
          block_sync_bytes_sent += enqueue_block( sb, true );
          ++peer_requested->last;
       } else {
@@ -4007,13 +4011,12 @@ namespace eosio {
          ( "p2p-listen-endpoint", bpo::value< vector<string> >()->default_value( vector<string>(1, string("0.0.0.0:9876:0")) ), "The actual host:port[:<rate-cap>] used to listen for incoming p2p connections. May be used multiple times. "
            "  The optional rate cap will limit per connection block sync bandwidth to the specified rate.  Total "
            "  allowed bandwidth is the rate-cap multiplied by the connection count limit.  A number alone will be "
-           "  interpreted as bytes per second.  The number is parsed locale-aware and may include thousands and "
-           "  decimal separators.  It may also be suffixed with units.  Supported units are: "
+           "  interpreted as bytes per second.  The number may be suffixed with units.  Supported units are: "
            "  'B/s', 'KB/s', 'MB/s, 'GB/s', 'TB/s', 'KiB/s', 'MiB/s', 'GiB/s', 'TiB/s'."
            "  Transactions and blocks outside of sync mode are not throttled."
            "  Examples:\n"
            "    192.168.0.100:9876:1MiB/s\n"
-           "    node.eos.io:9876:1,512KB/s\n"
+           "    node.eos.io:9876:1512KB/s\n"
            "    node.eos.io:9876:0.5GB/s\n"
            "    [2001:db8:85a3:8d3:1319:8a2e:370:7348]:9876:250KB/s")
          ( "p2p-server-address", bpo::value< vector<string> >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.")
@@ -4094,8 +4097,6 @@ namespace eosio {
 
    size_t net_plugin_impl::parse_connection_rate_limit( const std::string& limit_str) const {
       std::istringstream in(limit_str);
-      fc_dlog( logger, "parsing connection endpoint limit ${limit} with locale ${l}", ("limit", limit_str)("l", std::locale("").name()));
-      in.imbue(std::locale(""));
       double limit{0};
       in >> limit;
       EOS_ASSERT(limit >= 0.0, plugin_config_exception, "block sync rate limit must not be negative: ${limit}", ("limit", limit_str));
@@ -4749,6 +4750,7 @@ namespace eosio {
             , .last_bytes_sent = c->get_last_bytes_sent()
             , .block_sync_bytes_received = c->get_block_sync_bytes_received()
             , .block_sync_bytes_sent = c->get_block_sync_bytes_sent()
+            , .block_sync_throttling = c->get_block_sync_throttling()
             , .connection_start_time = c->connection_start_time
             , .log_p2p_address = c->log_p2p_address
          });
diff --git a/plugins/prometheus_plugin/metrics.hpp b/plugins/prometheus_plugin/metrics.hpp
index 5562896284..9c0fb3ac88 100644
--- a/plugins/prometheus_plugin/metrics.hpp
+++ b/plugins/prometheus_plugin/metrics.hpp
@@ -189,6 +189,7 @@ struct catalog_type {
          add_and_set_gauge("last_bytes_sent", peer.last_bytes_sent.count());
          add_and_set_gauge("block_sync_bytes_received", peer.block_sync_bytes_received);
          add_and_set_gauge("block_sync_bytes_sent", peer.block_sync_bytes_sent);
+         add_and_set_gauge("block_sync_throttling", peer.block_sync_throttling);
          add_and_set_gauge("connection_start_time", peer.connection_start_time.count());
          add_and_set_gauge(peer.log_p2p_address, 0); // Empty gauge; we only want the label
       }
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index 647ce2d3da..b8cec9bda9 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -2,11 +2,12 @@
 
 import math
 import re
-import requests
 import signal
+import sys
 import time
+import urllib
 
-from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys
+from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys, ReturnType
 from TestHarness.TestHelper import AppArgs
 
 ###############################################################
@@ -16,8 +17,6 @@
 #
 ###############################################################
 
-PROMETHEUS_URL = '/v1/prometheus/metrics'
-
 Print=Utils.Print
 errorExit=Utils.errorExit
 
@@ -42,16 +41,10 @@
 cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs)
 walletMgr=WalletMgr(True)
 
-def readMetrics(host: str, port: str):
-    response = requests.get(f'http://{host}:{port}{PROMETHEUS_URL}', timeout=10)
-    if response.status_code != 200:
-        errorExit(f'Prometheus metrics URL returned {response.status_code}: {response.url}')
-    return response
-
 def extractPrometheusMetric(connID: str, metric: str, text: str):
     searchStr = f'nodeos_p2p_connections{{connid_{connID}="{metric}"}} '
     begin = text.find(searchStr) + len(searchStr)
-    return int(text[begin:response.text.find('\n', begin)])
+    return int(text[begin:text.find('\n', begin)])
 
 prometheusHostPortPattern = re.compile(r'^nodeos_p2p_connections.connid_([0-9])="localhost:([0-9]*)', re.MULTILINE)
 
@@ -101,7 +94,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     beginLargeBlocksHeadBlock = nonProdNode.getHeadBlockNum()
 
     Print("Configure and launch txn generators")
-    targetTpsPerGenerator = 100
+    targetTpsPerGenerator = 200
     testTrxGenDurationSec=60
     trxGeneratorCnt=1
     cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name,accounts[1].name],
@@ -113,10 +106,10 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     throttlingNode = cluster.unstartedNodes[0]
     i = throttlingNode.cmd.index('--p2p-listen-endpoint')
     throttleListenAddr = throttlingNode.cmd[i+1]
-    # Using 40000 bytes per second to allow syncing of 10,000 byte blocks resulting from
-    # the trx generators in a reasonable amount of time, while still being reliably
-    # distinguishable from unthrottled throughput.
-    throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':40000B/s'
+    # Using 5000 bytes per second to allow syncing of ~100 transaction blocks resulting from
+    # the trx generators in a reasonable amount of time, while still being able to capture
+    # throttling state within the Prometheus update window (3 seconds in this test).
+    throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':5000B/s'
     throttleListenIP, throttleListenPort = throttleListenAddr.split(':')
     throttlingNode.cmd.append('--p2p-listen-endpoint')
     throttlingNode.cmd.append(f'{throttleListenIP}:{int(throttleListenPort)+100}:1TB/s')
@@ -126,86 +119,99 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     cluster.launchUnstarted(2)
 
     throttledNode = cluster.getNode(3)
-    while True:
+    while time.time() < clusterStart + 30:
         try:
-            response = readMetrics(throttlingNode.host, throttlingNode.port)
-        except (requests.ConnectionError, requests.ReadTimeout) as e:
-            # waiting for node to finish startup and respond
+            response = throttlingNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
+        except urllib.error.URLError:
+            # catch ConnectionRefusedEror waiting for node to finish startup and respond
             time.sleep(0.5)
+            continue
         else:
-            connPorts = prometheusHostPortPattern.findall(response.text)
+            if len(response) < 100:
+                # tolerate HTTPError as well (method returns only the exception code)
+                continue
+            connPorts = prometheusHostPortPattern.findall(response)
             if len(connPorts) < 3:
                 # wait for node to be connected
                 time.sleep(0.5)
                 continue
             Print('Throttling Node Start State')
-            #Print(response.text)
             throttlingNodePortMap = {port: id for id, port in connPorts}
             startSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
-                                                                   'block_sync_bytes_sent',
-                                                                   response.text)
+                                                                    'block_sync_bytes_sent',
+                                                                    response)
+            startSyncThrottlingState = extractPrometheusMetric(throttlingNodePortMap['9879'],
+                                                               'block_sync_throttling',
+                                                               response)
             Print(f'Start sync throttling bytes sent: {startSyncThrottlingBytesSent}')
+            Print(f'Start sync throttling node throttling: {"True" if startSyncThrottlingState else "False"}')
             break
-    while True:
+    else:
+        errorExit('Timed out')
+
+    while time.time() < clusterStart + 30:
         try:
-            response = readMetrics(throttledNode.host, throttledNode.port)
-        except (requests.ConnectionError, requests.ReadTimeout) as e:
-            # waiting for node to finish startup and respond
+            response = throttledNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
+        except urllib.error.URLError:
+            # catch ConnectionRefusedError waiting for node to finish startup and respond
             time.sleep(0.5)
+            continue
         else:
-            if 'nodeos_p2p_connections{connid_2' not in response.text:
+            if len(response) < 100:
+                # tolerate HTTPError as well (method returns only the exception code)
+                time.sleep(0.5)
+                continue
+            connPorts = prometheusHostPortPattern.findall(response)
+            if len(connPorts) < 2:
                 # wait for sending node to be connected
                 continue
             Print('Throttled Node Start State')
-            #Print(response.text)
-            connPorts = prometheusHostPortPattern.findall(response.text)
             throttledNodePortMap = {port: id for id, port in connPorts}
             startSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
-                                                                      'block_sync_bytes_received',
-                                                                      response.text)
+                                                                        'block_sync_bytes_received',
+                                                                        response)
             Print(f'Start sync throttled bytes received: {startSyncThrottledBytesReceived}')
             break
+    else:
+        errorExit('Timed out')
 
     # Throttling node was offline during block generation and once online receives blocks as fast as possible while
     # transmitting blocks to the next node in line at the above throttle setting.
     assert throttlingNode.waitForBlock(endLargeBlocksHeadBlock), f'wait for block {endLargeBlocksHeadBlock}  on throttled node timed out'
     endThrottlingSync = time.time()
-    try:
-        response = readMetrics(throttlingNode.host, throttlingNode.port)
-    except (requests.ConnectionError, requests.ReadTimeout) as e:
-        errorExit(str(e))
-    else:
-        Print('Throttling Node End State')
-        #Print(response.text)
-        endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
-                                                             'block_sync_bytes_sent',
-                                                             response.text)
-        Print(f'End sync throttling bytes sent: {endSyncThrottlingBytesSent}')
+    response = throttlingNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode()
+    Print('Throttling Node End State')
+    endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
+                                                            'block_sync_bytes_sent',
+                                                            response)
+    Print(f'End sync throttling bytes sent: {endSyncThrottlingBytesSent}')
     # Throttled node is connecting to a listen port with a block sync throttle applied so it will receive
     # blocks more slowly during syncing than an unthrottled node.
+    wasThrottled = False
+    while time.time() < endThrottlingSync + 30:
+        response = throttlingNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True,
+                                                       returnType=ReturnType.raw, printReturnLimit=16).decode()
+        throttledState = extractPrometheusMetric(throttlingNodePortMap['9879'],
+                                                 'block_sync_throttling',
+                                                 response)
+        if throttledState:
+            wasThrottled = True
+            break
     assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
     endThrottledSync = time.time()
-    try:
-        response = readMetrics(throttledNode.host, throttledNode.port)
-    except (requests.ConnectionError, requests.ReadTimeout) as e:
-        errorExit(str(e))
-    else:
-        Print('Throttled Node End State')
-        #Print(response.text)
-        endSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
-                                                                'block_sync_bytes_received',
-                                                                response.text)
-        Print(f'End sync throttled bytes received: {endSyncThrottledBytesReceived}')
+    response = throttledNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode()
+    Print('Throttled Node End State')
+    endSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
+                                                            'block_sync_bytes_received',
+                                                            response)
+    Print(f'End sync throttled bytes received: {endSyncThrottledBytesReceived}')
     throttlingElapsed = endThrottlingSync - clusterStart
     throttledElapsed = endThrottledSync - clusterStart
     Print(f'Unthrottled sync time: {throttlingElapsed} seconds')
     Print(f'Throttled sync time: {throttledElapsed} seconds')
     # Sanity check
-    assert throttledElapsed > throttlingElapsed + 15, 'Throttled sync time must be at least 15 seconds greater than unthrottled'
-    # Calculate block receive rate
-    calculatedRate = (endSyncThrottledBytesReceived - startSyncThrottledBytesReceived)/throttledElapsed
-    #assert math.isclose(calculatedRate, 40000, rel_tol=0.01), f'Throttled bytes receive rate must be near 40,000, was {calculatedRate}'
-    assert calculatedRate < 40000, f'Throttled bytes receive rate must be less than 40,000, was {calculatedRate}'
+    assert throttledElapsed > throttlingElapsed + 10, 'Throttled sync time must be at least 10 seconds greater than unthrottled'
+    assert wasThrottled, 'Throttling node never reported throttling its transmission rate'
 
     testSuccessful=True
 finally:

From 962f30982a0dbf5bb31ec119e38e19351c0f9ddf Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Fri, 6 Oct 2023 17:00:53 -0400
Subject: [PATCH 42/61] restore api_tests' deferred_cfa_not_allowed and
 deferred_cfa_success

---
 unittests/api_tests.cpp | 57 ++++++++++++++++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 7 deletions(-)

diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp
index 018e32d0ac..76b3457550 100644
--- a/unittests/api_tests.cpp
+++ b/unittests/api_tests.cpp
@@ -767,11 +767,11 @@ BOOST_FIXTURE_TEST_CASE(cfa_stateful_api, validating_tester)  try {
    BOOST_REQUIRE_EQUAL( validate(), true );
 } FC_LOG_AND_RETHROW()
 
-BOOST_FIXTURE_TEST_CASE(deferred_cfa_not_allowed, validating_tester)  try {
+BOOST_FIXTURE_TEST_CASE(deferred_cfa_failed, validating_tester)  try {
 
    create_account( "testapi"_n );
-   produce_blocks(1);
-   set_code( "testapi"_n, test_contracts::test_api_wasm() );
+	produce_blocks(1);
+	set_code( "testapi"_n, test_contracts::test_api_wasm() );
 
    account_name a = "testapi2"_n;
    account_name creator = config::system_account_name;
@@ -785,15 +785,58 @@ BOOST_FIXTURE_TEST_CASE(deferred_cfa_not_allowed, validating_tester)  try {
                                  .owner    = authority( get_public_key( a, "owner" ) ),
                                  .active   = authority( get_public_key( a, "active" ) )
                                  });
-   action act({}, test_api_action<TEST_METHOD("test_transaction", "context_free_api")>{});
+   action act({}, test_api_action<TEST_METHOD("test_transaction", "stateful_api")>{});
    trx.context_free_actions.push_back(act);
-   set_transaction_headers(trx, 10, 2); // set delay_sec to 2
+   set_transaction_headers(trx, 10, 2);
    trx.sign( get_private_key( creator, "active" ), control->get_chain_id()  );
+
    BOOST_CHECK_EXCEPTION(push_transaction( trx ), fc::exception,
       [&](const fc::exception &e) {
-         // any incoming trx is blocked
-         return expect_assert_message(e, "transaction cannot be delayed");
+         return expect_assert_message(e, "only context free api's can be used in this context");
+      });
+
+   produce_blocks(10);
+
+   // CFA failed, testapi2 not created
+   create_account( "testapi2"_n );
+
+   BOOST_REQUIRE_EQUAL( validate(), true );
+} FC_LOG_AND_RETHROW()
+
+BOOST_FIXTURE_TEST_CASE(deferred_cfa_success, validating_tester_no_disable_deferred_trx)  try {
+
+   create_account( "testapi"_n );
+	produce_blocks(1);
+	set_code( "testapi"_n, test_contracts::test_api_wasm() );
+
+   account_name a = "testapi2"_n;
+   account_name creator = config::system_account_name;
+   signed_transaction trx;
+   trx.actions.emplace_back( vector<permission_level>{{creator,config::active_name}},
+                                 newaccount{
+                                 .creator  = creator,
+                                 .name     = a,
+                                 .owner    = authority( get_public_key( a, "owner" ) ),
+                                 .active   = authority( get_public_key( a, "active" ) )
+                                 });
+   action act({}, test_api_action<TEST_METHOD("test_transaction", "context_free_api")>{});
+   trx.context_free_actions.push_back(act);
+   set_transaction_headers(trx, 10, 2);
+   trx.sign( get_private_key( creator, "active" ), control->get_chain_id()  );
+   auto trace = push_transaction( trx );
+   BOOST_REQUIRE(trace != nullptr);
+   if (trace) {
+      BOOST_REQUIRE_EQUAL(transaction_receipt_header::status_enum::delayed, trace->receipt->status);
+      BOOST_REQUIRE_EQUAL(1, trace->action_traces.size());
+   }
+   produce_blocks(10);
+
+   // CFA success, testapi2 created
+   BOOST_CHECK_EXCEPTION(create_account( "testapi2"_n ), fc::exception,
+      [&](const fc::exception &e) {
+         return expect_assert_message(e, "Cannot create account named testapi2, as that name is already taken");
       });
+   BOOST_REQUIRE_EQUAL( validate(), true );
 } FC_LOG_AND_RETHROW()
 
 BOOST_AUTO_TEST_CASE(light_validation_skip_cfa) try {

From d5045179cc75007c4949901c99447dd30590758b Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Fri, 6 Oct 2023 17:20:58 -0400
Subject: [PATCH 43/61] restore delayed trx processing in transaction_context

---
 libraries/chain/controller.cpp                |  8 ++-
 .../eosio/chain/transaction_context.hpp       |  2 +
 libraries/chain/transaction_context.cpp       | 57 ++++++++++++++++++-
 3 files changed, 62 insertions(+), 5 deletions(-)

diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp
index df5d2c8bde..ca586a3af6 100644
--- a/libraries/chain/controller.cpp
+++ b/libraries/chain/controller.cpp
@@ -1595,12 +1595,14 @@ struct controller_impl {
                                                trx->packed_trx()->get_prunable_size() );
             }
 
+            trx_context.delay = fc::seconds(trn.delay_sec);
+
             if( check_auth ) {
                authorization.check_authorization(
                        trn.actions,
                        trx->recovered_keys(),
                        {},
-                       fc::seconds(trn.delay_sec),
+                       trx_context.delay,
                        [&trx_context](){ trx_context.checktime(); },
                        false,
                        trx->is_dry_run()
@@ -1613,7 +1615,9 @@ struct controller_impl {
 
             trx->billed_cpu_time_us = trx_context.billed_cpu_time_us;
             if (!trx->implicit() && !trx->is_read_only()) {
-               transaction_receipt::status_enum s = transaction_receipt::executed;
+               transaction_receipt::status_enum s = (trx_context.delay == fc::seconds(0))
+                                                    ? transaction_receipt::executed
+                                                    : transaction_receipt::delayed;
                trace->receipt = push_receipt(*trx->packed_trx(), s, trx_context.billed_cpu_time_us, trace->net_usage);
                std::get<building_block>(pending->_block_stage)._pending_trx_metas.emplace_back(trx);
             } else {
diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp
index 18d3c31e65..430defce27 100644
--- a/libraries/chain/include/eosio/chain/transaction_context.hpp
+++ b/libraries/chain/include/eosio/chain/transaction_context.hpp
@@ -112,6 +112,7 @@ namespace eosio { namespace chain {
 
          void execute_action( uint32_t action_ordinal, uint32_t recurse_depth );
 
+         void schedule_transaction();
          void record_transaction( const transaction_id_type& id, fc::time_point_sec expire );
 
          void validate_cpu_usage_to_bill( int64_t billed_us, int64_t account_cpu_limit, bool check_minimum, int64_t subjective_billed_us )const;
@@ -142,6 +143,7 @@ namespace eosio { namespace chain {
          /// the maximum number of virtual CPU instructions of the transaction that can be safely billed to the billable accounts
          uint64_t                      initial_max_billable_cpu = 0;
 
+         fc::microseconds              delay;
          bool                          is_input           = false;
          bool                          apply_context_free = true;
          bool                          enforce_whiteblacklist = true;
diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp
index cd6b872955..d40f474258 100644
--- a/libraries/chain/transaction_context.cpp
+++ b/libraries/chain/transaction_context.cpp
@@ -247,7 +247,9 @@ namespace eosio { namespace chain {
                                                  uint64_t packed_trx_prunable_size )
    {
       const transaction& trx = packed_trx.get_transaction();
-      EOS_ASSERT( trx.delay_sec.value == 0, transaction_exception, "transaction cannot be delayed" );
+      if ( is_transient() ) {
+         EOS_ASSERT( trx.delay_sec.value == 0, transaction_exception, "transaction cannot be delayed" );
+      }
       if( trx.transaction_extensions.size() > 0 ) {
          disallow_transaction_extensions( "no transaction extensions supported yet for input transactions" );
       }
@@ -266,6 +268,13 @@ namespace eosio { namespace chain {
       uint64_t initial_net_usage = static_cast<uint64_t>(cfg.base_per_transaction_net_usage)
                                     + packed_trx_unprunable_size + discounted_size_for_pruned_data;
 
+      if( trx.delay_sec.value > 0 ) {
+          // If delayed, also charge ahead of time for the additional net usage needed to retire the delayed transaction
+          // whether that be by successfully executing, soft failure, hard failure, or expiration.
+         initial_net_usage += static_cast<uint64_t>(cfg.base_per_transaction_net_usage)
+                               + static_cast<uint64_t>(config::transaction_id_net_usage);
+      }
+
       published = control.pending_block_time();
       is_input = true;
       if (!control.skip_trx_checks()) {
@@ -309,8 +318,10 @@ namespace eosio { namespace chain {
          }
       }
 
-      for( const auto& act : trx.actions ) {
-         schedule_action( act, act.account, false, 0, 0 );
+      if( delay == fc::microseconds() ) {
+         for( const auto& act : trx.actions ) {
+            schedule_action( act, act.account, false, 0, 0 );
+         }
       }
 
       auto& action_traces = trace->action_traces;
@@ -318,6 +329,10 @@ namespace eosio { namespace chain {
       for( uint32_t i = 1; i <= num_original_actions_to_execute; ++i ) {
          execute_action( i, 0 );
       }
+
+      if( delay != fc::microseconds() ) {
+         schedule_transaction();
+      }
    }
 
    void transaction_context::finalize() {
@@ -715,6 +730,42 @@ namespace eosio { namespace chain {
       acontext.exec();
    }
 
+   void transaction_context::schedule_transaction() {
+      // Charge ahead of time for the additional net usage needed to retire the delayed transaction
+      // whether that be by successfully executing, soft failure, hard failure, or expiration.
+      const transaction& trx = packed_trx.get_transaction();
+      if( trx.delay_sec.value == 0 ) { // Do not double bill. Only charge if we have not already charged for the delay.
+         const auto& cfg = control.get_global_properties().configuration;
+         add_net_usage( static_cast<uint64_t>(cfg.base_per_transaction_net_usage)
+                         + static_cast<uint64_t>(config::transaction_id_net_usage) ); // Will exit early if net usage cannot be payed.
+      }
+
+      auto first_auth = trx.first_authorizer();
+
+      uint32_t trx_size = 0;
+      const auto& cgto = control.mutable_db().create<generated_transaction_object>( [&]( auto& gto ) {
+        gto.trx_id      = id;
+        gto.payer       = first_auth;
+        gto.sender      = account_name(); /// delayed transactions have no sender
+        gto.sender_id   = transaction_id_to_sender_id( gto.trx_id );
+        gto.published   = control.pending_block_time();
+        gto.delay_until = gto.published + delay;
+        gto.expiration  = gto.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window);
+        trx_size = gto.set( trx );
+
+        if (auto dm_logger = control.get_deep_mind_logger(is_transient())) {
+           std::string event_id = RAM_EVENT_ID("${id}", ("id", gto.id));
+
+           dm_logger->on_create_deferred(deep_mind_handler::operation_qualifier::push, gto, packed_trx);
+           dm_logger->on_ram_trace(std::move(event_id), "deferred_trx", "push", "deferred_trx_pushed");
+        }
+      });
+
+      int64_t ram_delta = (config::billable_size_v<generated_transaction_object> + trx_size);
+      add_ram_usage( cgto.payer, ram_delta );
+      trace->account_ram_delta = account_delta( cgto.payer, ram_delta );
+   }
+
    void transaction_context::record_transaction( const transaction_id_type& id, fc::time_point_sec expire ) {
       try {
           control.mutable_db().create<transaction_object>([&](transaction_object& transaction) {

From 13369e263bac3fa235d49245b2373337e0bb7027 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Fri, 6 Oct 2023 17:21:59 -0400
Subject: [PATCH 44/61] block incoming delayed trxs in producer_plugin

---
 plugins/producer_plugin/producer_plugin.cpp | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp
index b53090a7a6..d574254c73 100644
--- a/plugins/producer_plugin/producer_plugin.cpp
+++ b/plugins/producer_plugin/producer_plugin.cpp
@@ -773,6 +773,10 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
                                       transaction_metadata::trx_type       trx_type,
                                       bool                                 return_failure_traces,
                                       next_function<transaction_trace_ptr> next) {
+
+      const transaction& t = trx->get_transaction();
+      EOS_ASSERT( t.delay_sec.value == 0, transaction_exception, "transaction cannot be delayed" );
+
       if (trx_type == transaction_metadata::trx_type::read_only) {
          assert(_ro_thread_pool_size > 0); // enforced by chain_plugin
          assert(app().executor().get_main_thread_id() != std::this_thread::get_id()); // should only be called from read only threads

From 170c1a55d9cafe3753aa6cca6b1b2020683c4321 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Fri, 6 Oct 2023 17:22:45 -0400
Subject: [PATCH 45/61] update delay_tests

---
 unittests/delay_tests.cpp | 51 ++++++++++++---------------------------
 1 file changed, 15 insertions(+), 36 deletions(-)

diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp
index f62e9873a2..e5a993e1a7 100644
--- a/unittests/delay_tests.cpp
+++ b/unittests/delay_tests.cpp
@@ -190,18 +190,18 @@ static asset get_currency_balance(const validating_tester& chain, account_name a
 
 BOOST_AUTO_TEST_SUITE(delay_tests)
 
-// Delayed trxs are blocked.
-BOOST_FIXTURE_TEST_CASE( delayed_trx_blocked, validating_tester ) { try {
+BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester_no_disable_deferred_trx) { try {
+
    produce_blocks(2);
    signed_transaction trx;
 
    account_name a = "newco"_n;
    account_name creator = config::system_account_name;
 
-   auto owner_auth = authority( get_public_key( a, "owner" ) );
+   auto owner_auth =  authority( get_public_key( a, "owner" ) );
    trx.actions.emplace_back( vector<permission_level>{{creator,config::active_name}},
                              newaccount{
-                                .creator  = creator,
+                                .creator  = "bad"_n, /// a does not exist, this should error when execute
                                 .name     = a,
                                 .owner    = owner_auth,
                                 .active   = authority( get_public_key( a, "active" ) )
@@ -210,42 +210,21 @@ BOOST_FIXTURE_TEST_CASE( delayed_trx_blocked, validating_tester ) { try {
    trx.delay_sec = 3;
    trx.sign( get_private_key( creator, "active" ), control->get_chain_id()  );
 
-   // delayed trx is blocked
-   BOOST_CHECK_EXCEPTION(push_transaction( trx ), fc::exception,
-      [&](const fc::exception &e) {
-         return expect_assert_message(e, "transaction cannot be delayed");
-      });
+   ilog( fc::json::to_pretty_string(trx) );
+   auto trace = push_transaction( trx );
+   edump((*trace));
 
-   // no deferred trx was generated
-   auto gen_size = control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_REQUIRE_EQUAL(0u, gen_size);
-} FC_LOG_AND_RETHROW() }/// delayed_trx_blocked
+   produce_blocks(6);
 
-// Delayed actions are blocked.
-BOOST_AUTO_TEST_CASE( delayed_action_blocked ) { try {
-   validating_tester chain;
-   const auto& tester_account = "tester"_n;
-
-   chain.create_account("tester"_n);
-   chain.produce_blocks();
+   auto scheduled_trxs = get_scheduled_transactions();
+   BOOST_REQUIRE_EQUAL(scheduled_trxs.size(), 1u);
 
-   // delayed action is blocked
-   BOOST_CHECK_EXCEPTION(
-      chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object()
-           ("account", "tester")
-           ("permission", "first")
-           ("parent", "active")
-           ("auth",  authority(chain.get_public_key(tester_account, "first"))),
-           20, 10),
-      fc::exception,
-      [&](const fc::exception &e) {
-         return expect_assert_message(e, "transaction cannot be delayed");
-      });
+   auto billed_cpu_time_us = control->get_global_properties().configuration.min_transaction_cpu_usage;
+   auto dtrace = control->push_scheduled_transaction(scheduled_trxs.front(), fc::time_point::maximum(), fc::microseconds::maximum(), billed_cpu_time_us, true);
+   BOOST_REQUIRE_EQUAL(dtrace->except.has_value(), true);
+   BOOST_REQUIRE_EQUAL(dtrace->except->code(), missing_auth_exception::code_value);
 
-   // no deferred trx was generated
-   auto gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_REQUIRE_EQUAL(0u, gen_size);
-} FC_LOG_AND_RETHROW() }/// delayed_action_blocked
+} FC_LOG_AND_RETHROW() }
 
 // test link to permission with delay directly on it
 BOOST_AUTO_TEST_CASE( link_delay_direct_test ) { try {

From db34bbf35fce488a53cf9740bb04c7dce0a4d2bc Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 6 Oct 2023 16:38:05 -0500
Subject: [PATCH 46/61] Revise for better repeatability.

---
 tests/p2p_sync_throttle_test.py | 27 +++++++++++----------------
 1 file changed, 11 insertions(+), 16 deletions(-)

diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index b8cec9bda9..da101eb9b8 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -94,7 +94,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     beginLargeBlocksHeadBlock = nonProdNode.getHeadBlockNum()
 
     Print("Configure and launch txn generators")
-    targetTpsPerGenerator = 200
+    targetTpsPerGenerator = 500
     testTrxGenDurationSec=60
     trxGeneratorCnt=1
     cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name,accounts[1].name],
@@ -106,10 +106,10 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     throttlingNode = cluster.unstartedNodes[0]
     i = throttlingNode.cmd.index('--p2p-listen-endpoint')
     throttleListenAddr = throttlingNode.cmd[i+1]
-    # Using 5000 bytes per second to allow syncing of ~100 transaction blocks resulting from
+    # Using 4000 bytes per second to allow syncing of ~250 transaction blocks resulting from
     # the trx generators in a reasonable amount of time, while still being able to capture
     # throttling state within the Prometheus update window (3 seconds in this test).
-    throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':5000B/s'
+    throttlingNode.cmd[i+1] = throttlingNode.cmd[i+1] + ':4000B/s'
     throttleListenIP, throttleListenPort = throttleListenAddr.split(':')
     throttlingNode.cmd.append('--p2p-listen-endpoint')
     throttlingNode.cmd.append(f'{throttleListenIP}:{int(throttleListenPort)+100}:1TB/s')
@@ -119,7 +119,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     cluster.launchUnstarted(2)
 
     throttledNode = cluster.getNode(3)
-    while time.time() < clusterStart + 30:
+    while True:
         try:
             response = throttlingNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
         except urllib.error.URLError:
@@ -145,11 +145,10 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
                                                                response)
             Print(f'Start sync throttling bytes sent: {startSyncThrottlingBytesSent}')
             Print(f'Start sync throttling node throttling: {"True" if startSyncThrottlingState else "False"}')
+            if time.time() > clusterStart + 30: errorExit('Timed out')
             break
-    else:
-        errorExit('Timed out')
 
-    while time.time() < clusterStart + 30:
+    while True:
         try:
             response = throttledNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
         except urllib.error.URLError:
@@ -168,12 +167,10 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
             Print('Throttled Node Start State')
             throttledNodePortMap = {port: id for id, port in connPorts}
             startSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'],
-                                                                        'block_sync_bytes_received',
-                                                                        response)
+                                                                      'block_sync_bytes_received',
+                                                                      response)
             Print(f'Start sync throttled bytes received: {startSyncThrottledBytesReceived}')
             break
-    else:
-        errorExit('Timed out')
 
     # Throttling node was offline during block generation and once online receives blocks as fast as possible while
     # transmitting blocks to the next node in line at the above throttle setting.
@@ -182,8 +179,8 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     response = throttlingNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode()
     Print('Throttling Node End State')
     endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'],
-                                                            'block_sync_bytes_sent',
-                                                            response)
+                                                         'block_sync_bytes_sent',
+                                                         response)
     Print(f'End sync throttling bytes sent: {endSyncThrottlingBytesSent}')
     # Throttled node is connecting to a listen port with a block sync throttle applied so it will receive
     # blocks more slowly during syncing than an unthrottled node.
@@ -197,7 +194,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
         if throttledState:
             wasThrottled = True
             break
-    assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=90), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
+    assert throttledNode.waitForBlock(endLargeBlocksHeadBlock, timeout=30), f'Wait for block {endLargeBlocksHeadBlock} on sync node timed out'
     endThrottledSync = time.time()
     response = throttledNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode()
     Print('Throttled Node End State')
@@ -209,8 +206,6 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     throttledElapsed = endThrottledSync - clusterStart
     Print(f'Unthrottled sync time: {throttlingElapsed} seconds')
     Print(f'Throttled sync time: {throttledElapsed} seconds')
-    # Sanity check
-    assert throttledElapsed > throttlingElapsed + 10, 'Throttled sync time must be at least 10 seconds greater than unthrottled'
     assert wasThrottled, 'Throttling node never reported throttling its transmission rate'
 
     testSuccessful=True

From 222e2d2aa3ee6f6142be5902947ec076b65e671b Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Fri, 6 Oct 2023 18:18:43 -0400
Subject: [PATCH 47/61] do not trigger generated_transaction_object (for
 delayed trxs) in nodeos_chainbase_allocation_test.py

---
 tests/nodeos_chainbase_allocation_test.py | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/tests/nodeos_chainbase_allocation_test.py b/tests/nodeos_chainbase_allocation_test.py
index 5771428b80..4c2ec8ee21 100755
--- a/tests/nodeos_chainbase_allocation_test.py
+++ b/tests/nodeos_chainbase_allocation_test.py
@@ -31,7 +31,6 @@
     # The following is the list of chainbase objects that need to be verified:
     # - account_object (bootstrap)
     # - code_object (bootstrap)
-    # - generated_transaction_object
     # - global_property_object
     # - key_value_object (bootstrap)
     # - protocol_state_object (bootstrap)
@@ -55,12 +54,6 @@
     irrNode = cluster.getNode(irrNodeId)
     nonProdNode = cluster.getNode(nonProdNodeId)
 
-    # Create delayed transaction to create "generated_transaction_object"
-    cmd = "create account -j eosio sample EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\
-         EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV --delay-sec 600 -p eosio"
-    trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False)
-    assert trans
-
     # Schedule a new producer to trigger new producer schedule for "global_property_object"
     newProducerAcc = Account("newprod")
     newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"

From 6db4ad8aa535d0d4aef970ed4b74ef485be537d5 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Fri, 6 Oct 2023 18:33:30 -0500
Subject: [PATCH 48/61] Customize plugin_config_exception handling in
 net_plugin.

---
 plugins/net_plugin/net_plugin.cpp | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 565729e203..c33d5d2e80 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -4340,6 +4340,10 @@ namespace eosio {
                fc::create_listener<tcp>(
                      my->thread_pool.get_executor(), logger, accept_timeout, listen_addr, extra_listening_log_info,
                      [my = my, addr = p2p_addr, block_sync_rate_limit = block_sync_rate_limit](tcp::socket&& socket) { fc_dlog( logger, "start listening on ${addr} with peer sync throttle ${limit}", ("addr", addr)("limit", block_sync_rate_limit)); my->create_session(std::move(socket), addr, block_sync_rate_limit); });
+            } catch (const plugin_config_exception& e) {
+               fc_elog( logger, "${msg}", ("msg", e.top_message()));
+               app().quit();
+               return;
             } catch (const std::exception& e) {
                fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}",
                      ("addr", address)("what", e.what()) );

From d85e86fb106d507965b17a01f879ff54c07f371b Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Mon, 9 Oct 2023 15:19:29 -0400
Subject: [PATCH 49/61] do not validate a block containing delayed transactions
 after DISABLE_DEFERRED_TRXS_STAGE_2 is activated

---
 libraries/chain/transaction_context.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp
index d40f474258..541cc3d6fb 100644
--- a/libraries/chain/transaction_context.cpp
+++ b/libraries/chain/transaction_context.cpp
@@ -247,7 +247,10 @@ namespace eosio { namespace chain {
                                                  uint64_t packed_trx_prunable_size )
    {
       const transaction& trx = packed_trx.get_transaction();
-      if ( is_transient() ) {
+      // delayed transactions are not allowed after protocol feature
+      // DISABLE_DEFERRED_TRXS_STAGE_2 is activated;
+      // read-only and dry-run transactions are not allowed to be delayed at any time
+      if( control.is_builtin_activated(builtin_protocol_feature_t::disable_deferred_trxs_stage_2) || is_transient() ) {
          EOS_ASSERT( trx.delay_sec.value == 0, transaction_exception, "transaction cannot be delayed" );
       }
       if( trx.transaction_extensions.size() > 0 ) {

From 6f9b0b7e0e5328a3c03521a9d64038c790cd161e Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Mon, 9 Oct 2023 15:58:10 -0400
Subject: [PATCH 50/61] changed to not validate a block containing deferred
 trxs after DISABLE_DEFERRED_TRXS_STAGE_1 instead of
 DISABLE_DEFERRED_TRXS_STAGE_2

---
 libraries/chain/transaction_context.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp
index 541cc3d6fb..262d7995a7 100644
--- a/libraries/chain/transaction_context.cpp
+++ b/libraries/chain/transaction_context.cpp
@@ -248,9 +248,9 @@ namespace eosio { namespace chain {
    {
       const transaction& trx = packed_trx.get_transaction();
       // delayed transactions are not allowed after protocol feature
-      // DISABLE_DEFERRED_TRXS_STAGE_2 is activated;
+      // DISABLE_DEFERRED_TRXS_STAGE_1 is activated;
       // read-only and dry-run transactions are not allowed to be delayed at any time
-      if( control.is_builtin_activated(builtin_protocol_feature_t::disable_deferred_trxs_stage_2) || is_transient() ) {
+      if( control.is_builtin_activated(builtin_protocol_feature_t::disable_deferred_trxs_stage_1) || is_transient() ) {
          EOS_ASSERT( trx.delay_sec.value == 0, transaction_exception, "transaction cannot be delayed" );
       }
       if( trx.transaction_extensions.size() > 0 ) {

From e8e0b436b433704bca65d953a426824f187b1659 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Mon, 9 Oct 2023 16:31:15 -0400
Subject: [PATCH 51/61] update api_tests' deferred_cfa_failed

---
 unittests/api_tests.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp
index 76b3457550..20eced06f6 100644
--- a/unittests/api_tests.cpp
+++ b/unittests/api_tests.cpp
@@ -767,7 +767,7 @@ BOOST_FIXTURE_TEST_CASE(cfa_stateful_api, validating_tester)  try {
    BOOST_REQUIRE_EQUAL( validate(), true );
 } FC_LOG_AND_RETHROW()
 
-BOOST_FIXTURE_TEST_CASE(deferred_cfa_failed, validating_tester)  try {
+BOOST_FIXTURE_TEST_CASE(deferred_cfa_failed, validating_tester_no_disable_deferred_trx)  try {
 
    create_account( "testapi"_n );
 	produce_blocks(1);

From 7d8445fee5cd8e870847a7c2d2cf97f5fc9a8d98 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Mon, 9 Oct 2023 16:47:52 -0400
Subject: [PATCH 52/61] add a test to invlidate blocks containing deferred trxs
 after disable_deferred_trxs_stage_1 is activated

---
 unittests/protocol_feature_tests.cpp | 63 ++++++++++++++++++++++++++++
 1 file changed, 63 insertions(+)

diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp
index 86cf9ad719..6019b53da1 100644
--- a/unittests/protocol_feature_tests.cpp
+++ b/unittests/protocol_feature_tests.cpp
@@ -2183,4 +2183,67 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_2_dependency_test ) { try {
       fc_exception_message_starts_with("not all dependencies of protocol feature with digest"));
 } FC_LOG_AND_RETHROW() } /// disable_deferred_trxs_stage_2_dependency_test
 
+// Verify a block containing delayed transactions is not validated
+// after DISABLE_DEFERRED_TRXS_STAGE_1 is activated
+BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_block_validate_test ) { try {
+   tester_no_disable_deferred_trx tester1;
+
+   // Activate DISABLE_DEFERRED_TRXS_STAGE_1 such that tester1
+   // matches tester2 below
+   const auto& pfm1 = tester1.control->get_protocol_feature_manager();
+   auto d1 = pfm1.get_builtin_digest( builtin_protocol_feature_t::disable_deferred_trxs_stage_1 );
+   BOOST_REQUIRE( d1 );
+   tester1.preactivate_protocol_features( {*d1} );
+   tester1.produce_block();
+
+   // Create a block with valid transaction
+   tester1.create_account("newacc"_n);
+   auto b = tester1.produce_block();
+
+   // Make a copy of the block
+   auto copy_b = std::make_shared<signed_block>(std::move(*b));
+   // Retrieve the last transaction
+   auto signed_tx = std::get<packed_transaction>(copy_b->transactions.back().trx).get_signed_transaction();
+   // Make a delayed transaction by forcing delay_sec greater than 0
+   signed_tx.delay_sec = 120;
+   // Re-sign the transaction
+   signed_tx.signatures.clear();
+   signed_tx.sign(tester1.get_private_key(config::system_account_name, "active"), tester1.control->get_chain_id());
+   // Replace the original transaction with the delayed  transaction
+   auto delayed_tx = packed_transaction(signed_tx);
+   copy_b->transactions.back().trx = std::move(delayed_tx);
+
+   // Re-calculate the transaction merkle
+   deque<digest_type> trx_digests;
+   const auto& trxs = copy_b->transactions;
+   for( const auto& a : trxs )
+      trx_digests.emplace_back( a.digest() );
+   copy_b->transaction_mroot = merkle( std::move(trx_digests) );
+
+   // Re-sign the block
+   auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), tester1.control->head_block_state()->blockroot_merkle.get_root() ) );
+   auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, tester1.control->head_block_state()->pending_schedule.schedule_hash) );
+   copy_b->producer_signature = tester1.get_private_key(config::system_account_name, "active").sign(sig_digest);
+
+   // Create the second chain
+   tester_no_disable_deferred_trx tester2;
+   // Activate DISABLE_DEFERRED_TRXS_STAGE_1 on the second chain
+   const auto& pfm2 = tester2.control->get_protocol_feature_manager();
+   auto d2 = pfm2.get_builtin_digest( builtin_protocol_feature_t::disable_deferred_trxs_stage_1 );
+   BOOST_REQUIRE( d2 );
+   tester2.preactivate_protocol_features( {*d2} );
+   tester2.produce_block();
+
+   // Push the block with delayed transaction to the second chain
+   auto bsf = tester2.control->create_block_state_future( copy_b->calculate_id(), copy_b );
+   tester2.control->abort_block();
+   controller::block_report br;
+
+   // The block is invalidated
+   BOOST_REQUIRE_EXCEPTION(tester2.control->push_block( br, bsf.get(), forked_branch_callback{}, trx_meta_cache_lookup{} ),
+      fc::exception,
+      fc_exception_message_starts_with("transaction cannot be delayed")
+   );
+} FC_LOG_AND_RETHROW() } /// disable_deferred_trxs_stage_1_block_validate_test
+
 BOOST_AUTO_TEST_SUITE_END()

From c87d87f36957b8b3faba72bb96821904f1aa0248 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Tue, 10 Oct 2023 13:18:21 -0400
Subject: [PATCH 53/61] add a block validation test before
 DISABLE_DEFERRED_TRXS_STAGE_1 is activated

---
 unittests/protocol_feature_tests.cpp | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp
index 6019b53da1..022e973901 100644
--- a/unittests/protocol_feature_tests.cpp
+++ b/unittests/protocol_feature_tests.cpp
@@ -2183,9 +2183,28 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_2_dependency_test ) { try {
       fc_exception_message_starts_with("not all dependencies of protocol feature with digest"));
 } FC_LOG_AND_RETHROW() } /// disable_deferred_trxs_stage_2_dependency_test
 
+// Verify a block containing delayed transactions is validated
+// before DISABLE_DEFERRED_TRXS_STAGE_1 is activated
+BOOST_AUTO_TEST_CASE( block_validation_before_stage_1_test ) { try {
+   tester_no_disable_deferred_trx tester1;
+   tester_no_disable_deferred_trx tester2;
+
+   tester1.create_accounts( {"payloadless"_n} );
+   tester1.set_code( "payloadless"_n, test_contracts::payloadless_wasm() );
+   tester1.set_abi( "payloadless"_n, test_contracts::payloadless_abi().data() );
+
+   // Produce a block containing a delayed trx
+   constexpr uint32_t delay_sec = 10;
+   tester1.push_action("payloadless"_n, "doit"_n, "payloadless"_n, mutable_variant_object(), tester1.DEFAULT_EXPIRATION_DELTA, delay_sec);
+   auto b = tester1.produce_block();
+
+   // Push the block to another chain. The block should be validated
+   BOOST_REQUIRE_NO_THROW(tester2.push_block(b));
+} FC_LOG_AND_RETHROW() } /// block_validation_before_stage_1_test
+
 // Verify a block containing delayed transactions is not validated
 // after DISABLE_DEFERRED_TRXS_STAGE_1 is activated
-BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_block_validate_test ) { try {
+BOOST_AUTO_TEST_CASE( block_validation_after_stage_1_test ) { try {
    tester_no_disable_deferred_trx tester1;
 
    // Activate DISABLE_DEFERRED_TRXS_STAGE_1 such that tester1
@@ -2244,6 +2263,6 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_block_validate_test ) { try
       fc::exception,
       fc_exception_message_starts_with("transaction cannot be delayed")
    );
-} FC_LOG_AND_RETHROW() } /// disable_deferred_trxs_stage_1_block_validate_test
+} FC_LOG_AND_RETHROW() } /// block_validation_after_stage_1_test
 
 BOOST_AUTO_TEST_SUITE_END()

From caa703d32ac2b90f7b48cdf3b4285c28a6bf93a8 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 10 Oct 2023 14:04:17 -0500
Subject: [PATCH 54/61] Add comment.

---
 plugins/net_plugin/net_plugin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index c33d5d2e80..34147c0204 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1739,7 +1739,7 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
-         if( block_sync_rate_limit > 0 && peer_syncing_from_us ) {
+         if( block_sync_rate_limit > 0 && peer_syncing_from_us ) { // only throttle peers in sync mode even if a limit is set
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
             if( current_rate >= block_sync_rate_limit ) {

From 8a5dfeb2b15267d04df073813c523649ceee220a Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 10 Oct 2023 14:35:23 -0500
Subject: [PATCH 55/61] Revert "Add comment."

This reverts commit caa703d32ac2b90f7b48cdf3b4285c28a6bf93a8.
---
 plugins/net_plugin/net_plugin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index 34147c0204..c33d5d2e80 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1739,7 +1739,7 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
-         if( block_sync_rate_limit > 0 && peer_syncing_from_us ) { // only throttle peers in sync mode even if a limit is set
+         if( block_sync_rate_limit > 0 && peer_syncing_from_us ) {
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
             if( current_rate >= block_sync_rate_limit ) {

From e3d4870ac48394817e2195d3df958359e6b9afc8 Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 10 Oct 2023 14:40:09 -0500
Subject: [PATCH 56/61] Address peer review comments.

---
 plugins/net_plugin/net_plugin.cpp |  1 +
 tests/p2p_sync_throttle_test.py   | 14 ++++++++++++--
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp
index c33d5d2e80..449edfd68a 100644
--- a/plugins/net_plugin/net_plugin.cpp
+++ b/plugins/net_plugin/net_plugin.cpp
@@ -1739,6 +1739,7 @@ namespace eosio {
          sb = cc.fetch_block_by_number( num ); // thread-safe
       } FC_LOG_AND_DROP();
       if( sb ) {
+         // Skip transmitting block this loop if threshold exceeded
          if( block_sync_rate_limit > 0 && peer_syncing_from_us ) {
             auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(get_time() - connection_start_time);
             auto current_rate = double(block_sync_bytes_sent) / elapsed.count();
diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index da101eb9b8..fd5ec8aa4c 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -118,17 +118,20 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
     clusterStart = time.time()
     cluster.launchUnstarted(2)
 
+    errorLimit = 40  # Approximately 20 retries required
     throttledNode = cluster.getNode(3)
-    while True:
+    while errorLimit > 0:
         try:
             response = throttlingNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
         except urllib.error.URLError:
             # catch ConnectionRefusedEror waiting for node to finish startup and respond
+            errorLimit -= 1
             time.sleep(0.5)
             continue
         else:
             if len(response) < 100:
                 # tolerate HTTPError as well (method returns only the exception code)
+                errorLimit -= 1
                 continue
             connPorts = prometheusHostPortPattern.findall(response)
             if len(connPorts) < 3:
@@ -147,17 +150,22 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
             Print(f'Start sync throttling node throttling: {"True" if startSyncThrottlingState else "False"}')
             if time.time() > clusterStart + 30: errorExit('Timed out')
             break
+    else:
+        errorExit('Exceeded error retry limit waiting for throttling node')
 
-    while True:
+    errorLimit = 40  # Few if any retries required but for consistency...
+    while errorLimit > 0:
         try:
             response = throttledNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode()
         except urllib.error.URLError:
             # catch ConnectionRefusedError waiting for node to finish startup and respond
+            errorLimit -= 1
             time.sleep(0.5)
             continue
         else:
             if len(response) < 100:
                 # tolerate HTTPError as well (method returns only the exception code)
+                errorLimit -= 1
                 time.sleep(0.5)
                 continue
             connPorts = prometheusHostPortPattern.findall(response)
@@ -171,6 +179,8 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
                                                                       response)
             Print(f'Start sync throttled bytes received: {startSyncThrottledBytesReceived}')
             break
+    else:
+        errorExit('Exceeded error retry limit waiting for throttled node')
 
     # Throttling node was offline during block generation and once online receives blocks as fast as possible while
     # transmitting blocks to the next node in line at the above throttle setting.

From b067bca4e6779150330c291c9518fb555c0b674c Mon Sep 17 00:00:00 2001
From: Jonathan Giszczak <jonrg@hypercubepc.com>
Date: Tue, 10 Oct 2023 14:58:29 -0500
Subject: [PATCH 57/61] Address a couple more review comments.

---
 tests/p2p_sync_throttle_test.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py
index fd5ec8aa4c..4b15b8f49c 100755
--- a/tests/p2p_sync_throttle_test.py
+++ b/tests/p2p_sync_throttle_test.py
@@ -136,6 +136,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
             connPorts = prometheusHostPortPattern.findall(response)
             if len(connPorts) < 3:
                 # wait for node to be connected
+                errorLimit -= 1
                 time.sleep(0.5)
                 continue
             Print('Throttling Node Start State')
@@ -171,6 +172,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str):
             connPorts = prometheusHostPortPattern.findall(response)
             if len(connPorts) < 2:
                 # wait for sending node to be connected
+                errorLimit -= 1
                 continue
             Print('Throttled Node Start State')
             throttledNodePortMap = {port: id for id, port in connPorts}

From 7f50c71aa601dda0e5e5d8bcb24e672fadff52db Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Tue, 10 Oct 2023 17:18:00 -0400
Subject: [PATCH 58/61] remove modify_gto_for_canceldelay_test, restore
 canceldelay_test, and update disable_deferred_trxs_stage_1_no_op_test

---
 .../chain/include/eosio/chain/controller.hpp  |   1 -
 unittests/delay_tests.cpp                     | 529 +++++++++++++++---
 unittests/protocol_feature_tests.cpp          |  50 +-
 3 files changed, 474 insertions(+), 106 deletions(-)

diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp
index cef4b94e34..f03d61a1f1 100644
--- a/libraries/chain/include/eosio/chain/controller.hpp
+++ b/libraries/chain/include/eosio/chain/controller.hpp
@@ -370,7 +370,6 @@ namespace eosio { namespace chain {
       private:
          friend class apply_context;
          friend class transaction_context;
-         friend void modify_gto_for_canceldelay_test(controller& control, const transaction_id_type& trx_id); // canceldelay_test in delay_tests.cpp need access to mutable_db
 
          chainbase::database& mutable_db()const;
 
diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp
index e5a993e1a7..2a98ba3db9 100644
--- a/unittests/delay_tests.cpp
+++ b/unittests/delay_tests.cpp
@@ -16,22 +16,6 @@ using mvo = fc::mutable_variant_object;
 
 const std::string eosio_token = name("eosio.token"_n).to_string();
 
-// Native action hardcodes sender empty and builds sender_id from trx id.
-// This method modifies those two fields for contract generated deferred
-// trxs so canceldelay can be tested by canceldelay_test.
-namespace eosio::chain {
-inline void modify_gto_for_canceldelay_test(controller& control, const transaction_id_type& trx_id) {
-   auto gto = control.mutable_db().find<generated_transaction_object, by_trx_id>(trx_id);
-   if (gto) {
-      control.mutable_db().modify<generated_transaction_object>(*gto, [&]( auto& gtx ) {
-         gtx.sender = account_name();
-
-         fc::uint128 _id(trx_id._hash[3], trx_id._hash[2]);
-         gtx.sender_id = (unsigned __int128)_id;
-      });
-   }
-}} /// namespace eosio::chain
-
 static void create_accounts(validating_tester& chain) {
    chain.produce_blocks();
    chain.create_accounts({"eosio.msig"_n, "eosio.token"_n});
@@ -1370,50 +1354,241 @@ BOOST_AUTO_TEST_CASE( canceldelay_test ) { try {
    validating_tester_no_disable_deferred_trx chain;
    chain.produce_block();
 
-   const auto& contract_account = account_name("defcontract");
-   const auto& test_account = account_name("tester");
+   const auto& tester_account = "tester"_n;
+   std::vector<transaction_id_type> ids;
 
    chain.produce_blocks();
-   chain.create_accounts({contract_account, test_account});
+   chain.create_account("eosio.token"_n);
+   chain.produce_blocks(10);
+
+   chain.set_code("eosio.token"_n, test_contracts::eosio_token_wasm());
+   chain.set_abi("eosio.token"_n, test_contracts::eosio_token_abi());
+
    chain.produce_blocks();
-   chain.set_code(contract_account, test_contracts::deferred_test_wasm());
-   chain.set_abi(contract_account, test_contracts::deferred_test_abi());
+   chain.create_account("tester"_n);
+   chain.create_account("tester2"_n);
+   chain.produce_blocks(10);
+
+   chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
+           ("permission", "first")
+           ("parent", "active")
+           ("auth",  authority(chain.get_public_key(tester_account, "first"), 10))
+   );
+   chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
+           ("code", eosio_token)
+           ("type", "transfer")
+           ("requirement", "first"));
+
    chain.produce_blocks();
+   chain.push_action("eosio.token"_n, "create"_n, "eosio.token"_n, mutable_variant_object()
+           ("issuer", eosio_token)
+           ("maximum_supply", "9000000.0000 CUR")
+   );
+
+   chain.push_action("eosio.token"_n, name("issue"), "eosio.token"_n, fc::mutable_variant_object()
+           ("to",       eosio_token)
+           ("quantity", "1000000.0000 CUR")
+           ("memo", "for stuff")
+   );
 
+   auto trace = chain.push_action("eosio.token"_n, name("transfer"), "eosio.token"_n, fc::mutable_variant_object()
+       ("from", eosio_token)
+       ("to", "tester")
+       ("quantity", "100.0000 CUR")
+       ("memo", "hi" )
+   );
+   BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
    auto gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_CHECK_EQUAL(0u, gen_size);
+   BOOST_REQUIRE_EQUAL(0u, gen_size);
 
-   chain.push_action( contract_account, "delayedcall"_n, test_account, fc::mutable_variant_object()
-      ("payer",     test_account)
-      ("sender_id", 1)
-      ("contract",  contract_account)
-      ("payload",   42)
-      ("delay_sec", 1000)
-      ("replace_existing", false)
+   chain.produce_blocks();
+   auto liquid_balance = get_currency_balance(chain, "eosio.token"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("999900.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+
+   // this transaction will be delayed 20 blocks
+   trace = chain.push_action("eosio.token"_n, name("transfer"), "tester"_n, fc::mutable_variant_object()
+       ("from", "tester")
+       ("to", "tester2")
+       ("quantity", "1.0000 CUR")
+       ("memo", "hi" ),
+       30, 10
    );
+   //wdump((fc::json::to_pretty_string(trace)));
+   ids.push_back(trace->id);
+   BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(1u, gen_size);
+   BOOST_CHECK_EQUAL(0u, trace->action_traces.size());
 
    const auto& idx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
-   gen_size = idx.size();
-   BOOST_CHECK_EQUAL(1u, gen_size);
-   auto deferred_id = idx.begin()->trx_id;
+   auto itr = idx.find( trace->id );
+   BOOST_CHECK_EQUAL( (itr != idx.end()), true );
 
-   // canceldelay assumes sender and sender_id to be a specific
-   // format. hardcode them for testing purpose only
-   modify_gto_for_canceldelay_test(*(chain.control.get()), deferred_id);
+   chain.produce_blocks();
 
-   // send canceldelay for the delayed transaction
-   signed_transaction trx;
-   trx.actions.emplace_back(
-      vector<permission_level>{{contract_account, config::active_name}},
-      chain::canceldelay{{contract_account, config::active_name}, deferred_id}
+   liquid_balance = get_currency_balance(chain, "eosio.token"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("999900.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   BOOST_REQUIRE_EXCEPTION(
+      chain.push_action( config::system_account_name,
+                         updateauth::get_name(),
+                         vector<permission_level>{{tester_account, "first"_n}},
+                         fc::mutable_variant_object()
+            ("account", "tester")
+            ("permission", "first")
+            ("parent", "active")
+            ("auth",  authority(chain.get_public_key(tester_account, "first"))),
+            30, 7
+      ),
+      unsatisfied_authorization,
+      fc_exception_message_starts_with("transaction declares authority")
+   );
+
+   // this transaction will be delayed 20 blocks
+   trace = chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
+           ("permission", "first")
+           ("parent", "active")
+           ("auth",  authority(chain.get_public_key(tester_account, "first"))),
+           30, 10
+   );
+   //wdump((fc::json::to_pretty_string(trace)));
+   ids.push_back(trace->id);
+   BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(2u, gen_size);
+   BOOST_CHECK_EQUAL(0u, trace->action_traces.size());
+
+   chain.produce_blocks();
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   chain.produce_blocks(16);
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   // this transaction will be delayed 20 blocks
+   trace = chain.push_action("eosio.token"_n, name("transfer"), "tester"_n, fc::mutable_variant_object()
+       ("from", "tester")
+       ("to", "tester2")
+       ("quantity", "5.0000 CUR")
+       ("memo", "hi" ),
+       30, 10
    );
+   //wdump((fc::json::to_pretty_string(trace)));
+   ids.push_back(trace->id);
+   BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(3u, gen_size);
+   BOOST_CHECK_EQUAL(0u, trace->action_traces.size());
+
+   chain.produce_blocks();
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   // send canceldelay for first delayed transaction
+   signed_transaction trx;
+   trx.actions.emplace_back(vector<permission_level>{{"tester"_n, config::active_name}},
+                            chain::canceldelay{{"tester"_n, config::active_name}, ids[0]});
+
    chain.set_transaction_headers(trx);
-   trx.sign(chain.get_private_key(contract_account, "active"), chain.control->get_chain_id());
+   trx.sign(chain.get_private_key("tester"_n, "active"), chain.control->get_chain_id());
+   // first push as a dry_run trx
+   trace = chain.push_transaction(trx, fc::time_point::maximum(), base_tester::DEFAULT_BILLED_CPU_TIME_US, false, transaction_metadata::trx_type::dry_run);
+   BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+   // now push for real
+   trace = chain.push_transaction(trx);
+   BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(2u, gen_size);
+
+   const auto& cidx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+   auto citr = cidx.find( ids[0] );
+   BOOST_CHECK_EQUAL( (citr == cidx.end()), true );
+
+   chain.produce_blocks();
 
-   chain.push_transaction(trx);
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(2u, gen_size);
+
+   chain.produce_blocks();
+
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(2u, gen_size);
+
+   chain.produce_blocks();
+   // update auth will finally be performed
+
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(1u, gen_size);
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+   // this transfer is performed right away since delay is removed
+   trace = chain.push_action("eosio.token"_n, name("transfer"), "tester"_n, fc::mutable_variant_object()
+       ("from", "tester")
+       ("to", "tester2")
+       ("quantity", "10.0000 CUR")
+       ("memo", "hi" )
+   );
+   //wdump((fc::json::to_pretty_string(trace)));
+   BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(1u, gen_size);
+
+   chain.produce_blocks();
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("90.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("10.0000 CUR"), liquid_balance);
+
+   chain.produce_blocks(15);
+
+   gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_CHECK_EQUAL(1u, gen_size);
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("90.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("10.0000 CUR"), liquid_balance);
+
+   // second transfer finally is performed
+   chain.produce_blocks();
 
    gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
    BOOST_CHECK_EQUAL(0u, gen_size);
+
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("85.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester2"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("15.0000 CUR"), liquid_balance);
 } FC_LOG_AND_RETHROW() } /// canceldelay_test
 
 // test canceldelay action under different permission levels
@@ -1421,77 +1596,265 @@ BOOST_AUTO_TEST_CASE( canceldelay_test2 ) { try {
    validating_tester_no_disable_deferred_trx chain;
    chain.produce_block();
 
-   const auto& contract_account = account_name("defcontract");
-   const auto& tester_account = account_name("tester");
+   const auto& tester_account = "tester"_n;
 
    chain.produce_blocks();
-   chain.create_accounts({contract_account, tester_account});
+   chain.create_account("eosio.token"_n);
    chain.produce_blocks();
-   chain.set_code(contract_account, test_contracts::deferred_test_wasm());
-   chain.set_abi(contract_account, test_contracts::deferred_test_abi());
+
+   chain.set_code("eosio.token"_n, test_contracts::eosio_token_wasm());
+   chain.set_abi("eosio.token"_n, test_contracts::eosio_token_abi());
+
+   chain.produce_blocks();
+   chain.create_account("tester"_n);
+   chain.create_account("tester2"_n);
    chain.produce_blocks();
 
-   chain.push_action(config::system_account_name, updateauth::get_name(), contract_account, fc::mutable_variant_object()
-           ("account", "defcontract")
+   chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
            ("permission", "first")
            ("parent", "active")
-           ("auth",  authority(chain.get_public_key(contract_account, "first"), 5))
+           ("auth",  authority(chain.get_public_key(tester_account, "first"), 5))
+   );
+   chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object()
+          ("account", "tester")
+          ("permission", "second")
+          ("parent", "first")
+          ("auth",  authority(chain.get_public_key(tester_account, "second")))
    );
+   chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
+           ("code", eosio_token)
+           ("type", "transfer")
+           ("requirement", "first"));
+
    chain.produce_blocks();
+   chain.push_action("eosio.token"_n, "create"_n, "eosio.token"_n, mutable_variant_object()
+           ("issuer", eosio_token)
+           ("maximum_supply", "9000000.0000 CUR")
+   );
 
-   auto gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_CHECK_EQUAL(0u, gen_size);
+   chain.push_action("eosio.token"_n, name("issue"), "eosio.token"_n, fc::mutable_variant_object()
+           ("to",       eosio_token)
+           ("quantity", "1000000.0000 CUR")
+           ("memo", "for stuff")
+   );
 
-   chain.push_action( contract_account, "delayedcall"_n, tester_account, fc::mutable_variant_object()
-      ("payer",     tester_account)
-      ("sender_id", 1)
-      ("contract",  contract_account)
-      ("payload",   42)
-      ("delay_sec", 1000)
-      ("replace_existing", false)
+   auto trace = chain.push_action("eosio.token"_n, name("transfer"), "eosio.token"_n, fc::mutable_variant_object()
+       ("from", eosio_token)
+       ("to", "tester")
+       ("quantity", "100.0000 CUR")
+       ("memo", "hi" )
    );
+   BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+   auto gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+   BOOST_REQUIRE_EQUAL(0u, gen_size);
 
-   const auto& idx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
-   gen_size = idx.size();
-   BOOST_CHECK_EQUAL(1u, gen_size);
-   auto deferred_id = idx.begin()->trx_id;
+   chain.produce_blocks();
+   auto liquid_balance = get_currency_balance(chain, "eosio.token"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("999900.0000 CUR"), liquid_balance);
+   liquid_balance = get_currency_balance(chain, "tester"_n);
+   BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
 
-   // canceldelay assumes sender and sender_id to be a specific
-   // format. hardcode them for testing purpose only
-   modify_gto_for_canceldelay_test(*(chain.control.get()), deferred_id);
+   ilog("attempting first delayed transfer");
 
-   // attempt canceldelay with wrong canceling_auth for delayed trx
    {
+      // this transaction will be delayed 10 blocks
+      trace = chain.push_action("eosio.token"_n, name("transfer"), vector<permission_level>{{"tester"_n, "first"_n}}, fc::mutable_variant_object()
+          ("from", "tester")
+          ("to", "tester2")
+          ("quantity", "1.0000 CUR")
+          ("memo", "hi" ),
+          30, 5
+      );
+      auto trx_id = trace->id;
+      BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+      gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_REQUIRE_EQUAL(1u, gen_size);
+      BOOST_REQUIRE_EQUAL(0u, trace->action_traces.size());
+
+      const auto& idx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto itr = idx.find( trx_id );
+      BOOST_CHECK_EQUAL( (itr != idx.end()), true );
+
+      chain.produce_blocks();
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+      // attempt canceldelay with wrong canceling_auth for delayed transfer of 1.0000 CUR
+      {
+         signed_transaction trx;
+         trx.actions.emplace_back(vector<permission_level>{{"tester"_n, config::active_name}},
+                                  chain::canceldelay{{"tester"_n, config::active_name}, trx_id});
+         chain.set_transaction_headers(trx);
+         trx.sign(chain.get_private_key("tester"_n, "active"), chain.control->get_chain_id());
+         BOOST_REQUIRE_EXCEPTION( chain.push_transaction(trx), action_validate_exception,
+                                  fc_exception_message_is("canceling_auth in canceldelay action was not found as authorization in the original delayed transaction") );
+      }
+
+      // attempt canceldelay with "second" permission for delayed transfer of 1.0000 CUR
+      {
+         signed_transaction trx;
+         trx.actions.emplace_back(vector<permission_level>{{"tester"_n, "second"_n}},
+                                  chain::canceldelay{{"tester"_n, "first"_n}, trx_id});
+         chain.set_transaction_headers(trx);
+         trx.sign(chain.get_private_key("tester"_n, "second"), chain.control->get_chain_id());
+         BOOST_REQUIRE_THROW( chain.push_transaction(trx), irrelevant_auth_exception );
+         BOOST_REQUIRE_EXCEPTION( chain.push_transaction(trx), irrelevant_auth_exception,
+                                  fc_exception_message_starts_with("canceldelay action declares irrelevant authority") );
+      }
+
+      // canceldelay with "active" permission for delayed transfer of 1.0000 CUR
       signed_transaction trx;
       trx.actions.emplace_back(vector<permission_level>{{"tester"_n, config::active_name}},
-                               chain::canceldelay{{"tester"_n, config::active_name}, deferred_id});
+                               chain::canceldelay{{"tester"_n, "first"_n}, trx_id});
       chain.set_transaction_headers(trx);
       trx.sign(chain.get_private_key("tester"_n, "active"), chain.control->get_chain_id());
-      BOOST_REQUIRE_EXCEPTION( chain.push_transaction(trx), action_validate_exception,
-                               fc_exception_message_is("canceling_auth in canceldelay action was not found as authorization in the original delayed transaction") );
+      trace = chain.push_transaction(trx);
+
+      BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+      gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_REQUIRE_EQUAL(0u, gen_size);
+
+      const auto& cidx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto citr = cidx.find( trx_id );
+      BOOST_REQUIRE_EQUAL( (citr == cidx.end()), true );
+
+      chain.produce_blocks(10);
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
    }
 
-   // attempt canceldelay with wrong permission for delayed trx
+   ilog("reset minimum permission of transfer to second permission");
+
+   chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object()
+           ("account", "tester")
+           ("code", eosio_token)
+           ("type", "transfer")
+           ("requirement", "second"),
+           30, 5
+   );
+
+   chain.produce_blocks(11);
+
+
+   ilog("attempting second delayed transfer");
    {
+      // this transaction will be delayed 10 blocks
+      trace = chain.push_action("eosio.token"_n, name("transfer"), vector<permission_level>{{"tester"_n, "second"_n}}, fc::mutable_variant_object()
+          ("from", "tester")
+          ("to", "tester2")
+          ("quantity", "5.0000 CUR")
+          ("memo", "hi" ),
+          30, 5
+      );
+      auto trx_id = trace->id;
+      BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+      auto gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_CHECK_EQUAL(1u, gen_size);
+      BOOST_CHECK_EQUAL(0u, trace->action_traces.size());
+
+      const auto& idx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto itr = idx.find( trx_id );
+      BOOST_CHECK_EQUAL( (itr != idx.end()), true );
+
+      chain.produce_blocks();
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+      // canceldelay with "first" permission for delayed transfer of 5.0000 CUR
       signed_transaction trx;
-      trx.actions.emplace_back(vector<permission_level>{{contract_account, "first"_n}},
-                               chain::canceldelay{{contract_account, "first"_n}, deferred_id});
+      trx.actions.emplace_back(vector<permission_level>{{"tester"_n, "first"_n}},
+                               chain::canceldelay{{"tester"_n, "second"_n}, trx_id});
       chain.set_transaction_headers(trx);
-      trx.sign(chain.get_private_key(contract_account, "first"), chain.control->get_chain_id());
-      BOOST_REQUIRE_EXCEPTION( chain.push_transaction(trx), action_validate_exception,
-                               fc_exception_message_is("canceling_auth in canceldelay action was not found as authorization in the original delayed transaction") );
+      trx.sign(chain.get_private_key("tester"_n, "first"), chain.control->get_chain_id());
+      trace = chain.push_transaction(trx);
+
+      BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+      gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_REQUIRE_EQUAL(0u, gen_size);
+
+      const auto& cidx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto citr = cidx.find( trx_id );
+      BOOST_REQUIRE_EQUAL( (citr == cidx.end()), true );
+
+      chain.produce_blocks(10);
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
    }
 
-   // attempt canceldelay with wrong signature for delayed trx
+   ilog("attempting third delayed transfer");
+
    {
+      // this transaction will be delayed 10 blocks
+      trace = chain.push_action("eosio.token"_n, name("transfer"), vector<permission_level>{{"tester"_n, config::owner_name}}, fc::mutable_variant_object()
+          ("from", "tester")
+          ("to", "tester2")
+          ("quantity", "10.0000 CUR")
+          ("memo", "hi" ),
+          30, 5
+      );
+      auto trx_id = trace->id;
+      BOOST_REQUIRE_EQUAL(transaction_receipt::delayed, trace->receipt->status);
+      gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_REQUIRE_EQUAL(1u, gen_size);
+      BOOST_REQUIRE_EQUAL(0u, trace->action_traces.size());
+
+      const auto& idx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto itr = idx.find( trx_id );
+      BOOST_CHECK_EQUAL( (itr != idx.end()), true );
+
+      chain.produce_blocks();
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
+
+      // attempt canceldelay with "active" permission for delayed transfer of 10.0000 CUR
+      {
+         signed_transaction trx;
+         trx.actions.emplace_back(vector<permission_level>{{"tester"_n, "active"_n}},
+                                  chain::canceldelay{{"tester"_n, config::owner_name}, trx_id});
+         chain.set_transaction_headers(trx);
+         trx.sign(chain.get_private_key("tester"_n, "active"), chain.control->get_chain_id());
+         BOOST_REQUIRE_THROW( chain.push_transaction(trx), irrelevant_auth_exception );
+      }
+
+      // canceldelay with "owner" permission for delayed transfer of 10.0000 CUR
       signed_transaction trx;
-      trx.actions.emplace_back(vector<permission_level>{{contract_account, config::active_name}},
-                               chain::canceldelay{{contract_account, config::active_name}, deferred_id});
+      trx.actions.emplace_back(vector<permission_level>{{"tester"_n, config::owner_name}},
+                               chain::canceldelay{{"tester"_n, config::owner_name}, trx_id});
       chain.set_transaction_headers(trx);
-      trx.sign(chain.get_private_key(contract_account, "first"), chain.control->get_chain_id());
-      BOOST_REQUIRE_THROW( chain.push_transaction(trx), unsatisfied_authorization );
-      BOOST_REQUIRE_EXCEPTION( chain.push_transaction(trx), unsatisfied_authorization,
-                               fc_exception_message_starts_with("transaction declares authority") );
+      trx.sign(chain.get_private_key("tester"_n, "owner"), chain.control->get_chain_id());
+      trace = chain.push_transaction(trx);
+
+      BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status);
+      gen_size = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
+      BOOST_REQUIRE_EQUAL(0u, gen_size);
+
+      const auto& cidx = chain.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
+      auto citr = cidx.find( trx_id );
+      BOOST_REQUIRE_EQUAL( (citr == cidx.end()), true );
+
+      chain.produce_blocks(10);
+
+      liquid_balance = get_currency_balance(chain, "tester"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("100.0000 CUR"), liquid_balance);
+      liquid_balance = get_currency_balance(chain, "tester2"_n);
+      BOOST_REQUIRE_EQUAL(asset::from_string("0.0000 CUR"), liquid_balance);
    }
 } FC_LOG_AND_RETHROW() } /// canceldelay_test2
 
diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp
index 022e973901..ebef30e756 100644
--- a/unittests/protocol_feature_tests.cpp
+++ b/unittests/protocol_feature_tests.cpp
@@ -1921,18 +1921,15 @@ BOOST_AUTO_TEST_CASE( set_parameters_packed_test ) { try {
                        c.error("alice does not have permission to call this API"));
 } FC_LOG_AND_RETHROW() }
 
-// native action hardcodes sender empty and builds sender_id from trx id.
-// modify_gto_for_canceldelay_test modifies those two fields for contract
-// generated deferred trxs so canceldelay can be used. defined in delay_tests.cpp
-namespace eosio::chain { extern void modify_gto_for_canceldelay_test(controller& control, const transaction_id_type& trx_id) ; }
-
 BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_no_op_test ) { try {
    tester_no_disable_deferred_trx c;
 
    c.produce_block();
-   c.create_accounts( {"alice"_n, "bob"_n, "test"_n} );
+   c.create_accounts( {"alice"_n, "bob"_n, "test"_n, "payloadless"_n} );
    c.set_code( "test"_n, test_contracts::deferred_test_wasm() );
    c.set_abi( "test"_n, test_contracts::deferred_test_abi() );
+   c.set_code( "payloadless"_n, test_contracts::payloadless_wasm() );
+   c.set_abi( "payloadless"_n, test_contracts::payloadless_abi().data() );
    c.produce_block();
 
    auto gen_size = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
@@ -1959,7 +1956,7 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_no_op_test ) { try {
    gen_size = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
    BOOST_REQUIRE_EQUAL(0u, gen_size);
 
-   // generate a new deferred trx for the rest of the test
+   // generate a deferred trx from contract for cancel_deferred test
    c.push_action( "test"_n, "delayedcall"_n, "alice"_n, fc::mutable_variant_object()
       ("payer", "alice")
       ("sender_id", 1)
@@ -1968,12 +1965,25 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_no_op_test ) { try {
       ("delay_sec", 120)
       ("replace_existing", false)
    );
+
+   // generate a delayed trx for canceldelay test
+   constexpr uint32_t delay_sec = 10;
+   c.push_action("payloadless"_n, "doit"_n, "payloadless"_n, mutable_variant_object(), c.DEFAULT_EXPIRATION_DELTA, delay_sec);
+
+   // make sure two trxs were generated
    c.produce_block();
    const auto& idx = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>();
    gen_size = idx.size();
-   BOOST_REQUIRE_EQUAL(1u, gen_size);
-   BOOST_REQUIRE_EQUAL(idx.begin()->payer, "alice"_n);
-   auto alice_trx_id = idx.begin()->trx_id;
+   BOOST_REQUIRE_EQUAL(2u, gen_size);
+   transaction_id_type alice_trx_id;
+   transaction_id_type payloadless_trx_id;
+   for( auto itr = idx.begin(); itr != idx.end(); ++itr ) {
+      if( itr->payer == "alice"_n) {
+         alice_trx_id = itr->trx_id;
+      } else {
+         payloadless_trx_id = itr->trx_id;
+      }
+   }
 
    // activate disable_deferred_trxs_stage_1
    const auto& pfm = c.control->get_protocol_feature_manager();
@@ -1995,7 +2005,7 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_no_op_test ) { try {
 
    // verify bob's deferred trx is not made to generated_transaction_multi_index
    gen_size = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_REQUIRE_EQUAL(1u, gen_size);
+   BOOST_REQUIRE_EQUAL(2u, gen_size);
    // verify alice's deferred trx is still in generated_transaction_multi_index
    auto gto = c.control->db().find<generated_transaction_object, by_trx_id>(alice_trx_id);
    BOOST_REQUIRE(gto != nullptr);
@@ -2007,31 +2017,27 @@ BOOST_AUTO_TEST_CASE( disable_deferred_trxs_stage_1_no_op_test ) { try {
       eosio_assert_message_exception,
       eosio_assert_message_is( "cancel_deferred failed" ) );
    gen_size = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_REQUIRE_EQUAL(1u, gen_size);
+   BOOST_REQUIRE_EQUAL(2u, gen_size);
    // verify alice's deferred trx is not removed
    gto = c.control->db().find<generated_transaction_object, by_trx_id>(alice_trx_id);
    BOOST_REQUIRE( gto );
 
-   // verify canceldelay native action is no-op
-
-   // canceldelay assumes sender and sender_id to be a specific format
-   modify_gto_for_canceldelay_test(*(c.control.get()), alice_trx_id);
    // call canceldelay native action
    signed_transaction trx;
    trx.actions.emplace_back(
-      vector<permission_level>{{"test"_n, config::active_name}},
-      canceldelay{{"test"_n, config::active_name}, alice_trx_id}
+      vector<permission_level>{{"payloadless"_n, config::active_name}},
+      canceldelay{{"payloadless"_n, config::active_name}, payloadless_trx_id}
    );
    c.set_transaction_headers(trx);
-   trx.sign(c.get_private_key("test"_n, "active"), c.control->get_chain_id());
+   trx.sign(c.get_private_key("payloadless"_n, "active"), c.control->get_chain_id());
    c.push_transaction(trx);
    c.produce_block();
 
    // verify canceldelay is no-op
    gen_size = c.control->db().get_index<generated_transaction_multi_index,by_trx_id>().size();
-   BOOST_REQUIRE_EQUAL(1u, gen_size);
-   // verify alice's deferred trx is not removed
-   gto = c.control->db().find<generated_transaction_object, by_trx_id>(alice_trx_id);
+   BOOST_REQUIRE_EQUAL(2u, gen_size);
+   // verify payloadless' delayed trx is not removed
+   gto = c.control->db().find<generated_transaction_object, by_trx_id>(payloadless_trx_id);
    BOOST_REQUIRE( gto );
 } FC_LOG_AND_RETHROW() } /// disable_deferred_trxs_stage_1_no_op_test
 

From 5834fde500a0c3435cd99b6c082cf16e7742dbfa Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Tue, 10 Oct 2023 20:24:23 -0400
Subject: [PATCH 59/61] add a test for blocking incoming delayed trxs by
 producer_plugin

---
 plugins/producer_plugin/test/CMakeLists.txt   |   3 +-
 .../test/test_disallow_delayed_trx.cpp        | 101 ++++++++++++++++++
 2 files changed, 103 insertions(+), 1 deletion(-)
 create mode 100644 plugins/producer_plugin/test/test_disallow_delayed_trx.cpp

diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt
index 42c42596f8..877ffd9f11 100644
--- a/plugins/producer_plugin/test/CMakeLists.txt
+++ b/plugins/producer_plugin/test/CMakeLists.txt
@@ -2,7 +2,8 @@ add_executable( test_producer_plugin
         test_trx_full.cpp
         test_options.cpp
         test_block_timing_util.cpp
+        test_disallow_delayed_trx.cpp
         main.cpp
         )
 target_link_libraries( test_producer_plugin producer_plugin eosio_testing eosio_chain_wrap )
-add_test(NAME test_producer_plugin COMMAND plugins/producer_plugin/test/test_producer_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
\ No newline at end of file
+add_test(NAME test_producer_plugin COMMAND plugins/producer_plugin/test/test_producer_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
diff --git a/plugins/producer_plugin/test/test_disallow_delayed_trx.cpp b/plugins/producer_plugin/test/test_disallow_delayed_trx.cpp
new file mode 100644
index 0000000000..7189fb4e1a
--- /dev/null
+++ b/plugins/producer_plugin/test/test_disallow_delayed_trx.cpp
@@ -0,0 +1,101 @@
+#include <eosio/producer_plugin/producer_plugin.hpp>
+#include <eosio/testing/tester.hpp>
+#include <boost/test/unit_test.hpp>
+
+namespace eosio::test::detail {
+using namespace eosio::chain::literals;
+struct testit {
+   uint64_t id;
+
+   testit( uint64_t id = 0 ) :id(id){}
+
+   static account_name get_account() {
+      return chain::config::system_account_name;
+   }
+
+   static action_name get_name() {
+      return "testit"_n;
+   }
+};
+}
+FC_REFLECT( eosio::test::detail::testit, (id) )
+
+namespace {
+
+using namespace eosio;
+using namespace eosio::chain;
+using namespace eosio::test::detail;
+
+auto make_delayed_trx( const chain_id_type& chain_id ) {
+   account_name creator = config::system_account_name;
+
+   signed_transaction trx;
+   trx.actions.emplace_back( vector<permission_level>{{creator, config::active_name}}, testit{0} );
+   trx.delay_sec = 10;
+   auto priv_key = private_key_type::regenerate<fc::ecc::private_key_shim>(fc::sha256::hash(std::string("nathan")));
+   trx.sign( priv_key, chain_id );
+
+   return std::make_shared<packed_transaction>( std::move(trx) );
+}
+}
+
+BOOST_AUTO_TEST_SUITE(disallow_delayed_trx_test)
+
+// Verifies that incoming delayed transactions are blocked.
+BOOST_AUTO_TEST_CASE(delayed_trx) {
+   using namespace std::chrono_literals;
+   fc::temp_directory temp;
+   appbase::scoped_app app;
+   auto temp_dir_str = temp.path().string();
+   
+   std::promise<std::tuple<producer_plugin*, chain_plugin*>> plugin_promise;
+   std::future<std::tuple<producer_plugin*, chain_plugin*>> plugin_fut = plugin_promise.get_future();
+   std::thread app_thread( [&]() {
+      try {
+         fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug);
+         std::vector<const char*> argv =
+            {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(),
+               "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" };
+         app->initialize<chain_plugin, producer_plugin>( argv.size(), (char**) &argv[0] );
+         app->startup();
+         plugin_promise.set_value(
+            {app->find_plugin<producer_plugin>(), app->find_plugin<chain_plugin>()} );
+         app->exec();
+         return;
+      } FC_LOG_AND_DROP()
+      BOOST_CHECK(!"app threw exception see logged error");
+   } );
+
+   auto[prod_plug, chain_plug] = plugin_fut.get();
+   auto chain_id = chain_plug->get_chain_id();
+
+   // create a delayed trx
+   auto ptrx = make_delayed_trx( chain_id );
+
+   // send it as incoming trx
+   app->post( priority::low, [ptrx, &app]() {
+      bool return_failure_traces = true;
+
+      // the delayed trx is blocked
+      BOOST_REQUIRE_EXCEPTION(
+         app->get_method<plugin_interface::incoming::methods::transaction_async>()(ptrx,
+            false,
+            transaction_metadata::trx_type::input,
+            return_failure_traces,
+            [ptrx, return_failure_traces] (const next_function_variant<transaction_trace_ptr>& result) {
+               elog( "trace with except ${e}", ("e", fc::json::to_pretty_string( *std::get<chain::transaction_trace_ptr>( result ) )) );
+            }
+         ),
+         fc::exception,
+         eosio::testing::fc_exception_message_starts_with("transaction cannot be delayed")
+      );
+   });
+
+   // leave time for transaction to be executed
+   std::this_thread::sleep_for( 2000ms );
+
+   app->quit();
+   app_thread.join();
+}
+
+BOOST_AUTO_TEST_SUITE_END()

From a49aaf686418fa607347f0a91b6e047be0b21ec9 Mon Sep 17 00:00:00 2001
From: Kevin Heifner <heifnerk@objectcomputing.com>
Date: Wed, 11 Oct 2023 09:39:15 -0500
Subject: [PATCH 60/61] GH-1501 Need a larger timeout since 5 seconds was too
 small allowing set contract to work but not seeing it yet.

---
 tests/TestHarness/transactions.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py
index 5950a8b42e..e755a10e98 100644
--- a/tests/TestHarness/transactions.py
+++ b/tests/TestHarness/transactions.py
@@ -203,7 +203,7 @@ def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransB
             if not waitForTransBlock:
                 return trans
             transId=NodeosQueries.getTransId(trans)
-            if self.waitForTransactionInBlock(transId, timeout=5, exitOnError=False):
+            if self.waitForTransactionInBlock(transId, timeout=30, exitOnError=False):
                 break
 
         return trans

From ef21be9e72c7e17ebfc35855ddc8c1bf90b833a5 Mon Sep 17 00:00:00 2001
From: Lin Huang <lin.huang@eosnetwork.com>
Date: Wed, 11 Oct 2023 11:29:29 -0400
Subject: [PATCH 61/61] bump Leap version to 5.0.0 rc2

---
 CMakeLists.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3ac5693df9..ff2708fadb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -16,7 +16,7 @@ set( CXX_STANDARD_REQUIRED ON)
 set(VERSION_MAJOR 5)
 set(VERSION_MINOR 0)
 set(VERSION_PATCH 0)
-set(VERSION_SUFFIX rc1)
+set(VERSION_SUFFIX rc2)
 
 if(VERSION_SUFFIX)
     set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}")