From d913a2ee5fd15db091b65aad725dbc81ee1feece Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 11 Oct 2023 11:01:40 -0500 Subject: [PATCH 1/4] GH-1683 Add prometheus plugin for easier manual testing of prometheus. Add mapped_private database-map-mode since none of the tests currently use it. --- plugins/prometheus_plugin/metrics.hpp | 101 +++++++++++++++++--------- tests/nodeos_run_test.py | 3 +- tools/net-util.py | 88 +++++++++++----------- 3 files changed, 114 insertions(+), 78 deletions(-) diff --git a/plugins/prometheus_plugin/metrics.hpp b/plugins/prometheus_plugin/metrics.hpp index b9de6b6435..c2d412e1ce 100644 --- a/plugins/prometheus_plugin/metrics.hpp +++ b/plugins/prometheus_plugin/metrics.hpp @@ -33,18 +33,34 @@ struct catalog_type { // http plugin prometheus::Family& http_request_counts; - // net plugin p2p-connections - prometheus::Family& p2p_connections; - - Gauge& num_peers; - Gauge& num_clients; - // net plugin failed p2p connection Counter& failed_p2p_connections; // net plugin dropped_trxs Counter& dropped_trxs_total; + struct p2p_connection_metrics { + Gauge& num_peers; + Gauge& num_clients; + + prometheus::Family& addr; // Empty gauge; ipv6 address can't be transmitted as a double + prometheus::Family& port; + prometheus::Family& connection_number; + prometheus::Family& accepting_blocks; + prometheus::Family& last_received_block; + prometheus::Family& first_available_block; + prometheus::Family& last_available_block; + prometheus::Family& unique_first_block_count; + prometheus::Family& latency; + prometheus::Family& bytes_received; + prometheus::Family& last_bytes_received; + prometheus::Family& bytes_sent; + prometheus::Family& last_bytes_sent; + prometheus::Family& connection_start_time; + prometheus::Family& peer_addr; // Empty gauge; we only want the label + }; + p2p_connection_metrics p2p_metrics; + // producer plugin prometheus::Family& cpu_usage_us; prometheus::Family& net_usage_us; @@ -97,12 +113,27 @@ struct catalog_type { catalog_type() : info(family("nodeos", "static information about the server")) , http_request_counts(family("nodeos_http_requests_total", "number of HTTP requests")) - , p2p_connections(family("nodeos_p2p_connections", "current number of connected p2p connections")) - , num_peers(p2p_connections.Add({{"direction", "out"}})) - , num_clients(p2p_connections.Add({{"direction", "in"}})) - , failed_p2p_connections( - build("nodeos_failed_p2p_connections", "total number of failed out-going p2p connections")) - , dropped_trxs_total(build("nodeos_dropped_trxs_total", "total number of dropped transactions by net plugin")) + , failed_p2p_connections(build("nodeos_p2p_failed_connections", "total number of failed out-going p2p connections")) + , dropped_trxs_total(build("nodeos_p2p_dropped_trxs_total", "total number of dropped transactions by net plugin")) + , p2p_metrics{ + .num_peers{build("nodeos_p2p_peers", "current number of connected outgoing peers")} + , .num_clients{build("nodeos_p2p_clients", "current number of connected incoming clients")} + , .addr{family("nodeos_p2p_addr", "ipv6 address")} + , .port{family("nodeos_p2p_port", "port")} + , .connection_number{family("nodeos_p2p_connection_number", "monatomic increasing connection number")} + , .accepting_blocks{family("nodeos_p2p_accepting_blocks", "accepting blocks on connection")} + , .last_received_block{family("nodeos_p2p_last_received_block", "last received block on connection")} + , .first_available_block{family("nodeos_p2p_first_available_block", "first block available from connection")} + , .last_available_block{family("nodeos_p2p_last_available_block", "last block available from connection")} + , .unique_first_block_count{family("nodeos_p2p_unique_first_block_count", "number of blocks first received from any connection on this connection")} + , .latency{family("nodeos_p2p_latency", "last calculated latency with connection")} + , .bytes_received{family("nodeos_p2p_bytes_received", "total bytes received on connection")} + , .last_bytes_received{family("nodeos_p2p_last_bytes_received", "last time anything received from peer")} + , .bytes_sent{family("nodeos_p2p_bytes_sent", "total bytes sent to peer")} + , .last_bytes_sent{family("nodeos_p2p_last_bytes_sent", "last time anything sent to peer")} + , .connection_start_time{family("nodeos_p2p_connection_start_time", "time of last connection to peer")} + , .peer_addr{family("nodeos_p2p_peer_addr", "peer address")} + } , cpu_usage_us(family("nodeos_cpu_usage_us_total", "total cpu usage in microseconds for blocks")) , net_usage_us(family("nodeos_net_usage_us_total", "total net usage in microseconds for blocks")) , last_irreversible(build("nodeos_last_irreversible", "last irreversible block number")) @@ -164,31 +195,33 @@ struct catalog_type { } void update(const net_plugin::p2p_connections_metrics& metrics) { - num_peers.Set(metrics.num_peers); - num_clients.Set(metrics.num_clients); + p2p_metrics.num_peers.Set(metrics.num_peers); + p2p_metrics.num_clients.Set(metrics.num_clients); for(size_t i = 0; i < metrics.stats.peers.size(); ++i) { - std::string label{"connid_" + to_string(metrics.stats.peers[i].connection_id)}; - auto add_and_set_gauge = [&](const std::string& label_value, - const auto& value) { - auto& gauge = p2p_connections.Add({{label, label_value}}); + auto& peer = metrics.stats.peers[i]; + auto& conn_id = peer.unique_conn_node_id; + + auto addr = boost::asio::ip::make_address_v6(peer.address).to_string(); + p2p_metrics.addr.Add({{"connid", conn_id},{"ipv6", addr},{"address", peer.p2p_address}}); + + auto add_and_set_gauge = [&](auto& fam, const auto& value) { + auto& gauge = fam.Add({{"connid", conn_id}}); gauge.Set(value); }; - auto& peer = metrics.stats.peers[i]; - auto addr = std::string("addr_") + boost::asio::ip::make_address_v6(peer.address).to_string(); - add_and_set_gauge(addr, 0); // Empty gauge; ipv6 address can't be transmitted as a double - add_and_set_gauge("port", peer.port); - add_and_set_gauge("accepting_blocks", peer.accepting_blocks); - add_and_set_gauge("last_received_block", peer.last_received_block); - add_and_set_gauge("first_available_block", peer.first_available_block); - add_and_set_gauge("last_available_block", peer.last_available_block); - add_and_set_gauge("unique_first_block_count", peer.unique_first_block_count); - add_and_set_gauge("latency", peer.latency); - add_and_set_gauge("bytes_received", peer.bytes_received); - add_and_set_gauge("last_bytes_received", peer.last_bytes_received.count()); - add_and_set_gauge("bytes_sent", peer.bytes_sent); - add_and_set_gauge("last_bytes_sent", peer.last_bytes_sent.count()); - add_and_set_gauge("connection_start_time", peer.connection_start_time.count()); - add_and_set_gauge(peer.log_p2p_address, 0); // Empty gauge; we only want the label + + add_and_set_gauge(p2p_metrics.connection_number, peer.connection_id); + add_and_set_gauge(p2p_metrics.port, peer.port); + add_and_set_gauge(p2p_metrics.accepting_blocks, peer.accepting_blocks); + add_and_set_gauge(p2p_metrics.last_received_block, peer.last_received_block); + add_and_set_gauge(p2p_metrics.first_available_block, peer.first_available_block); + add_and_set_gauge(p2p_metrics.last_available_block, peer.last_available_block); + add_and_set_gauge(p2p_metrics.unique_first_block_count, peer.unique_first_block_count); + add_and_set_gauge(p2p_metrics.latency, peer.latency); + add_and_set_gauge(p2p_metrics.bytes_received, peer.bytes_received); + add_and_set_gauge(p2p_metrics.last_bytes_received, peer.last_bytes_received.count()); + add_and_set_gauge(p2p_metrics.bytes_sent, peer.bytes_sent); + add_and_set_gauge(p2p_metrics.last_bytes_sent, peer.last_bytes_sent.count()); + add_and_set_gauge(p2p_metrics.connection_start_time, peer.connection_start_time.count()); } } diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 3b31996a9b..59b6fa68d9 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -61,8 +61,9 @@ abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') traceNodeosArgs=" --http-max-response-time-ms 990000 --trace-rpc-abi eosio.token=" + abs_path + extraNodeosArgs=traceNodeosArgs + " --plugin eosio::prometheus_plugin --database-map-mode mapped_private " specificNodeosInstances={0: "bin/nodeos"} - if cluster.launch(totalNodes=2, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=traceNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: + if cluster.launch(totalNodes=2, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=extraNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: diff --git a/tools/net-util.py b/tools/net-util.py index 7dff39a70f..be63176de5 100755 --- a/tools/net-util.py +++ b/tools/net-util.py @@ -96,16 +96,16 @@ def __init__(self): ('nodeos_info', 'earliest_available_block_num'): 'Earliest Available Block:', 'nodeos_head_block_num': 'Head Block Num:', 'nodeos_last_irreversible': 'LIB:', - ('nodeos_p2p_connections','in'): 'Inbound P2P Connections:', - ('nodeos_p2p_connections','out'): 'Outbound P2P Connections:', + 'nodeos_p2p_clients': 'Inbound P2P Connections:', + 'nodeos_p2p_peers': 'Outbound P2P Connections:', 'nodeos_blocks_incoming_total': 'Total Incoming Blocks:', 'nodeos_trxs_incoming_total': 'Total Incoming Trxs:', 'nodeos_blocks_produced_total': 'Blocks Produced:', 'nodeos_trxs_produced_total': 'Trxs Produced:', 'nodeos_scheduled_trxs_total': 'Scheduled Trxs:', 'nodeos_unapplied_transactions_total': 'Unapplied Trxs:', - 'nodeos_dropped_trxs_total': 'Dropped Trxs:', - 'nodeos_failed_p2p_connections_total': 'Failed P2P Connections:', + 'nodeos_p2p_dropped_trxs_total': 'Dropped Trxs:', + 'nodeos_p2p_failed_connections_total': 'Failed P2P Connections:', 'nodeos_http_requests_total': 'HTTP Requests:', } self.ignoredPrometheusMetrics = [ @@ -301,53 +301,55 @@ def __init__(self, bytesReceived=0, bytesSent=0, connectionStarted=0): for family in text_string_to_metric_families(response.text): bandwidths = {} for sample in family.samples: + listwalker = getattr(self, 'connectionIDLW') + if "connid" in sample.labels: + connID = sample.labels["connid"] + if connID not in listwalker: + startOffset = endOffset = len(listwalker) + listwalker.append(AttrMap(Text(connID), None, 'reversed')) + else: + startOffset = listwalker.index(connID) + endOffset = startOffset + 1 if sample.name in self.prometheusMetrics: fieldName = self.fields.get(self.prometheusMetrics[sample.name]) field = getattr(self, fieldName) field.set_text(str(int(sample.value))) + elif sample.name == 'nodeos_p2p_addr': + listwalker = getattr(self, 'ipAddressLW') + addr = ipaddress.ip_address(sample.labels["ipv6"]) + host = f'{str(addr.ipv4_mapped) if addr.ipv4_mapped else str(addr)}' + listwalker[startOffset:endOffset] = [AttrMap(Text(host), None, 'reversed')] + listwalker = getattr(self, 'hostnameLW') + addr = sample.labels["address"] + listwalker[startOffset:endOffset] = [AttrMap(Text(addr), None, 'reversed')] + elif sample.name == 'nodeos_p2p_bytes_sent': + bytesSent = int(sample.value) + stats = bandwidths.get(connID, bandwidthStats()) + stats.bytesSent = bytesSent + bandwidths[connID] = stats + elif sample.name == 'nodeos_p2p_bytes_received': + bytesReceived = int(sample.value) + stats = bandwidths.get(connID, bandwidthStats()) + stats.bytesReceived = bytesReceived + bandwidths[connID] = stats + elif sample.name == 'nodeos_p2p_connection_start_time': + connectionStarted = int(sample.value) + stats = bandwidths.get(connID, bandwidthStats()) + stats.connectionStarted = connectionStarted + bandwidths[connID] = stats + elif sample.name == 'nodeos_p2p_connection_number': + pass + elif sample.name.startswith('nodeos_p2p_'): + fieldName = sample.name[len('nodeos_p2p_'):] + attrname = fieldName[:1] + fieldName.replace('_', ' ').title().replace(' ', '')[1:] + 'LW' + if hasattr(self, attrname): + listwalker = getattr(self, attrname) + listwalker[startOffset:endOffset] = [AttrMap(Text(self.peerMetricConversions[fieldName](sample.value)), None, 'reversed')] elif sample.name == 'nodeos_p2p_connections': if 'direction' in sample.labels: fieldName = self.fields.get(self.prometheusMetrics[(sample.name, sample.labels['direction'])]) field = getattr(self, fieldName) field.set_text(str(int(sample.value))) - else: - connID = next(iter(sample.labels)) - fieldName = sample.labels[connID] - listwalker = getattr(self, 'connectionIDLW') - if connID not in listwalker: - startOffset = endOffset = len(listwalker) - listwalker.append(AttrMap(Text(connID), None, 'reversed')) - else: - startOffset = listwalker.index(connID) - endOffset = startOffset + 1 - if fieldName.startswith('addr_'): - listwalker = getattr(self, 'ipAddressLW') - addr = ipaddress.ip_address(fieldName[len('addr_'):]) - host = f'{str(addr.ipv4_mapped) if addr.ipv4_mapped else str(addr)}' - listwalker[startOffset:endOffset] = [AttrMap(Text(host), None, 'reversed')] - elif fieldName == 'bytes_received': - bytesReceived = int(sample.value) - stats = bandwidths.get(connID, bandwidthStats()) - stats.bytesReceived = bytesReceived - bandwidths[connID] = stats - elif fieldName == 'bytes_sent': - bytesSent = int(sample.value) - stats = bandwidths.get(connID, bandwidthStats()) - stats.bytesSent = bytesSent - bandwidths[connID] = stats - elif fieldName == 'connection_start_time': - connectionStarted = int(sample.value) - stats = bandwidths.get(connID, bandwidthStats()) - stats.connectionStarted = connectionStarted - bandwidths[connID] = stats - else: - attrname = fieldName[:1] + fieldName.replace('_', ' ').title().replace(' ', '')[1:] + 'LW' - if hasattr(self, attrname): - listwalker = getattr(self, attrname) - listwalker[startOffset:endOffset] = [AttrMap(Text(self.peerMetricConversions[fieldName](sample.value)), None, 'reversed')] - else: - listwalker = getattr(self, 'hostnameLW') - listwalker[startOffset:endOffset] = [AttrMap(Text(fieldName.replace('_', '.')), None, 'reversed')] elif sample.name == 'nodeos_info': for infoLabel, infoValue in sample.labels.items(): fieldName = self.fields.get(self.prometheusMetrics[(sample.name, infoLabel)]) @@ -360,7 +362,7 @@ def __init__(self, bytesReceived=0, bytesSent=0, connectionStarted=0): if sample.name not in self.ignoredPrometheusMetrics: logger.warning(f'Received unhandled Prometheus metric {sample.name}') else: - if sample.name == 'nodeos_p2p_connections': + if sample.name == 'nodeos_p2p_bytes_sent' or sample.name == 'nodeos_p2p_bytes_received': now = time.time_ns() connIDListwalker = getattr(self, 'connectionIDLW') for connID, stats in bandwidths.items(): From 50ca986c1e0b449f593e66063ab7858a3eca00a0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 11 Oct 2023 11:02:11 -0500 Subject: [PATCH 2/4] GH-1683 Use stable id for connections --- .../include/eosio/net_plugin/net_plugin.hpp | 3 +- plugins/net_plugin/net_plugin.cpp | 53 ++++++++++++------- 2 files changed, 35 insertions(+), 21 deletions(-) diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index d0c482e5b1..26dbe7d8f4 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -55,7 +55,8 @@ namespace eosio { size_t bytes_sent{0}; std::chrono::nanoseconds last_bytes_sent{0}; std::chrono::nanoseconds connection_start_time{0}; - std::string log_p2p_address; + std::string p2p_address; + std::string unique_conn_node_id; }; explicit p2p_per_connection_metrics(size_t count) { peers.reserve(count); diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 807660bd6f..f28ca9c2f3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -898,7 +898,9 @@ namespace eosio { block_id_type fork_head GUARDED_BY(conn_mtx); uint32_t fork_head_num GUARDED_BY(conn_mtx) {0}; fc::time_point last_close GUARDED_BY(conn_mtx); - string remote_endpoint_ip GUARDED_BY(conn_mtx); + std::string p2p_address GUARDED_BY(conn_mtx); + std::string unique_conn_node_id GUARDED_BY(conn_mtx); + std::string remote_endpoint_ip GUARDED_BY(conn_mtx); boost::asio::ip::address_v6::bytes_type remote_endpoint_ip_array GUARDED_BY(conn_mtx); std::chrono::nanoseconds connection_start_time{0}; @@ -1184,7 +1186,8 @@ namespace eosio { connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), - last_handshake_sent() + last_handshake_sent(), + p2p_address( endpoint ) { my_impl->mark_bp_connection(this); update_endpoints(); @@ -3181,6 +3184,10 @@ namespace eosio { } log_p2p_address = msg.p2p_address; + fc::unique_lock g_conn( conn_mtx ); + p2p_address = msg.p2p_address; + unique_conn_node_id = msg.node_id.str().substr( 0, 7 ); + g_conn.unlock(); my_impl->mark_bp_connection(this); if (my_impl->exceeding_connection_limit(this)) { @@ -4478,25 +4485,31 @@ namespace eosio { if (update_p2p_connection_metrics) { fc::unique_lock g_conn((*it)->conn_mtx); boost::asio::ip::address_v6::bytes_type addr = (*it)->remote_endpoint_ip_array; + std::string p2p_addr = (*it)->p2p_address; + std::string conn_node_id = (*it)->unique_conn_node_id; g_conn.unlock(); - net_plugin::p2p_per_connection_metrics::connection_metric metrics{ - .connection_id = (*it)->connection_id - , .address = addr - , .port = (*it)->get_remote_endpoint_port() - , .accepting_blocks = (*it)->is_blocks_connection() - , .last_received_block = (*it)->get_last_received_block_num() - , .first_available_block = (*it)->get_peer_start_block_num() - , .last_available_block = (*it)->get_peer_head_block_num() - , .unique_first_block_count = (*it)->get_unique_blocks_rcvd_count() - , .latency = (*it)->get_peer_ping_time_ns() - , .bytes_received = (*it)->get_bytes_received() - , .last_bytes_received = (*it)->get_last_bytes_received() - , .bytes_sent = (*it)->get_bytes_sent() - , .last_bytes_sent = (*it)->get_last_bytes_sent() - , .connection_start_time = (*it)->connection_start_time - , .log_p2p_address = (*it)->log_p2p_address - }; - per_connection.peers.push_back(metrics); + if (!conn_node_id.empty()) { + net_plugin::p2p_per_connection_metrics::connection_metric metrics{ + .connection_id = (*it)->connection_id + , .address = addr + , .port = (*it)->get_remote_endpoint_port() + , .accepting_blocks = (*it)->is_blocks_connection() + , .last_received_block = (*it)->get_last_received_block_num() + , .first_available_block = (*it)->get_peer_start_block_num() + , .last_available_block = (*it)->get_peer_head_block_num() + , .unique_first_block_count = (*it)->get_unique_blocks_rcvd_count() + , .latency = (*it)->get_peer_ping_time_ns() + , .bytes_received = (*it)->get_bytes_received() + , .last_bytes_received = (*it)->get_last_bytes_received() + , .bytes_sent = (*it)->get_bytes_sent() + , .last_bytes_sent = (*it)->get_last_bytes_sent() + , .connection_start_time = (*it)->connection_start_time + , .p2p_address = p2p_addr + , .unique_conn_node_id = conn_node_id + }; + per_connection.peers.push_back(metrics); + } + } if (!(*it)->socket_is_open() && (*it)->state() != connection::connection_state::connecting) { From e66ff7427cb06392a7543ddd4f876599c2bef9fe Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 11 Oct 2023 14:40:59 -0500 Subject: [PATCH 3/4] GH-1683 Update throttle test for new prometheus format --- tests/p2p_sync_throttle_test.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/p2p_sync_throttle_test.py b/tests/p2p_sync_throttle_test.py index 4b15b8f49c..9205b81086 100755 --- a/tests/p2p_sync_throttle_test.py +++ b/tests/p2p_sync_throttle_test.py @@ -42,11 +42,11 @@ walletMgr=WalletMgr(True) def extractPrometheusMetric(connID: str, metric: str, text: str): - searchStr = f'nodeos_p2p_connections{{connid_{connID}="{metric}"}} ' + searchStr = f'nodeos_p2p_{metric}{{connid="{connID}"}} ' begin = text.find(searchStr) + len(searchStr) return int(text[begin:text.find('\n', begin)]) -prometheusHostPortPattern = re.compile(r'^nodeos_p2p_connections.connid_([0-9])="localhost:([0-9]*)', re.MULTILINE) +prometheusHostPortPattern = re.compile(r'^nodeos_p2p_port.connid="([a-f0-9]*)". ([0-9]*)', re.MULTILINE) try: TestHelper.printSystemInfo("BEGIN") @@ -120,6 +120,8 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): errorLimit = 40 # Approximately 20 retries required throttledNode = cluster.getNode(3) + throttledNodeConnId = None + throttlingNodeConnId = None while errorLimit > 0: try: response = throttlingNode.processUrllibRequest('prometheus', 'metrics', returnType=ReturnType.raw, printReturnLimit=16).decode() @@ -134,17 +136,19 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): errorLimit -= 1 continue connPorts = prometheusHostPortPattern.findall(response) + Print(connPorts) if len(connPorts) < 3: # wait for node to be connected errorLimit -= 1 time.sleep(0.5) continue Print('Throttling Node Start State') - throttlingNodePortMap = {port: id for id, port in connPorts} - startSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'], + throttlingNodePortMap = {port: id for id, port in connPorts if id != '' and port != '9877'} + throttlingNodeConnId = next(iter(throttlingNodePortMap.values())) # 9879 + startSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodeConnId, 'block_sync_bytes_sent', response) - startSyncThrottlingState = extractPrometheusMetric(throttlingNodePortMap['9879'], + startSyncThrottlingState = extractPrometheusMetric(throttlingNodeConnId, 'block_sync_throttling', response) Print(f'Start sync throttling bytes sent: {startSyncThrottlingBytesSent}') @@ -170,13 +174,16 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): time.sleep(0.5) continue connPorts = prometheusHostPortPattern.findall(response) + Print(connPorts) if len(connPorts) < 2: # wait for sending node to be connected errorLimit -= 1 continue Print('Throttled Node Start State') - throttledNodePortMap = {port: id for id, port in connPorts} - startSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'], + throttledNodePortMap = {port: id for id, port in connPorts if id != ''} + throttledNodeConnId = next(iter(throttledNodePortMap.values())) # 9878 + Print(throttledNodeConnId) + startSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodeConnId, 'block_sync_bytes_received', response) Print(f'Start sync throttled bytes received: {startSyncThrottledBytesReceived}') @@ -190,7 +197,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): endThrottlingSync = time.time() response = throttlingNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode() Print('Throttling Node End State') - endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodePortMap['9879'], + endSyncThrottlingBytesSent = extractPrometheusMetric(throttlingNodeConnId, 'block_sync_bytes_sent', response) Print(f'End sync throttling bytes sent: {endSyncThrottlingBytesSent}') @@ -200,7 +207,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): while time.time() < endThrottlingSync + 30: response = throttlingNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode() - throttledState = extractPrometheusMetric(throttlingNodePortMap['9879'], + throttledState = extractPrometheusMetric(throttlingNodeConnId, 'block_sync_throttling', response) if throttledState: @@ -210,7 +217,7 @@ def extractPrometheusMetric(connID: str, metric: str, text: str): endThrottledSync = time.time() response = throttledNode.processUrllibRequest('prometheus', 'metrics', exitOnError=True, returnType=ReturnType.raw, printReturnLimit=16).decode() Print('Throttled Node End State') - endSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodePortMap['9878'], + endSyncThrottledBytesReceived = extractPrometheusMetric(throttledNodeConnId, 'block_sync_bytes_received', response) Print(f'End sync throttled bytes received: {endSyncThrottledBytesReceived}') From 292df502ae4190ba85981ac2204f3cefac63860b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 11 Oct 2023 14:42:33 -0500 Subject: [PATCH 4/4] GH-1683 Add const --- plugins/prometheus_plugin/metrics.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/prometheus_plugin/metrics.hpp b/plugins/prometheus_plugin/metrics.hpp index c0e8f77e3c..656d27a408 100644 --- a/plugins/prometheus_plugin/metrics.hpp +++ b/plugins/prometheus_plugin/metrics.hpp @@ -204,10 +204,10 @@ struct catalog_type { p2p_metrics.num_peers.Set(metrics.num_peers); p2p_metrics.num_clients.Set(metrics.num_clients); for(size_t i = 0; i < metrics.stats.peers.size(); ++i) { - auto& peer = metrics.stats.peers[i]; - auto& conn_id = peer.unique_conn_node_id; + const auto& peer = metrics.stats.peers[i]; + const auto& conn_id = peer.unique_conn_node_id; - auto addr = boost::asio::ip::make_address_v6(peer.address).to_string(); + const auto addr = boost::asio::ip::make_address_v6(peer.address).to_string(); p2p_metrics.addr.Add({{"connid", conn_id},{"ipv6", addr},{"address", peer.p2p_address}}); auto add_and_set_gauge = [&](auto& fam, const auto& value) {