Skip to content

Commit

Permalink
Merge #2914: [Test] Speed up functional tests
Browse files Browse the repository at this point in the history
26ed4d4 Do not arbitrarily sleep when waiting for all messages (Alessandro Rezzi)
82a36fd remove many other useless time.sleep (Alessandro Rezzi)
310ebd1 Do not sleep in is_node_stopped (Alessandro Rezzi)
998233e Connect all nodes in parallel (Alessandro Rezzi)
ecc5120 test: Do not sleep inside wait_for_rpc_connection (Alessandro Rezzi)

Pull request description:

  The aim of this PR is making functional test run faster

  First commit:
  Sleeping inside `wait_for_rpc_connection`  is useless since in case of failure it will try again rather than throwing error.

  Second commit:
  profiling showed that waiting for all nodes to connect to each other was very slow (`8` nodes took about ~`32` seconds on my PC). If we connect them in parallel time is much faster ( more or less `4` seconds on my PC)

  Third commit:
  sleeping inside `is_node_stopped` is useless since it is always used in operations like `wait_until_stopped`, and in case a node is not stopped it will wait and try again rather than throwing error.

  Fourth commit:
  removed many other big `time.sleep()` that made everything go overall much slower. This is the only commit that I'm not 100% sure so if we see that some test fail at the beginning (I mean before `run_test()` is called) this one can be reverted.

  Fifth commit:
  Make faster and solve the failure of `p2p_invalid_messages.py`:
  When receiving lots of messages use a timeout system, instead of sleeping for an arbitrary amount of time (30 seconds, which sometimes was not enough)

ACKs for top commit: 26ed4d4
  Liquid369:
    tACK 26ed4d4
  Duddino:
    utACK 26ed4d4
  Fuzzbawls:
    ACK 26ed4d4

Tree-SHA512: e0da682fa0c5f53e7c4179e00f4afe23455353326383ff36ee0459ccc8b175f12dc9da330086faf3ce596487927bc7fba30aa5f6bef9b5c8ff130da3d8228eaa
  • Loading branch information
Fuzzbawls committed Mar 23, 2024
2 parents 4c2ed24 + 26ed4d4 commit efaa521
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 17 deletions.
13 changes: 7 additions & 6 deletions test/functional/p2p_invalid_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
wait_until,
)
from random import getrandbits

Expand Down Expand Up @@ -55,6 +56,10 @@ def on_getdata(self, message):
self.send_message(self.vec_mnp[inv.hash])
self.getdata_count+=1

def wait_for_p2p_messages(self, n_messages):
wait_until(lambda: self.getdata_count == n_messages, timeout=60)


class InvalidMessagesTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 1
Expand Down Expand Up @@ -217,18 +222,14 @@ def test_fill_askfor(self):
assert_equal(len(invs), 50000)
msg = messages.msg_inv(invs)
conn.send_message(msg)

time.sleep(30) # wait a bit
assert_equal(conn.getdata_count, 50000)
conn.wait_for_p2p_messages(50000)

# Prior #2611 the node was blocking any follow-up request.
mnp = msg_mnping(CTxIn(COutPoint(getrandbits(256))), getrandbits(256), int(time.time()))
conn.vec_mnp[mnp.get_hash()] = mnp
msg = messages.msg_inv([messages.CInv(15, mnp.get_hash())])
conn.send_and_ping(msg)
time.sleep(3)

assert_equal(conn.getdata_count, 50001)
conn.wait_for_p2p_messages(50001)
self.nodes[0].disconnect_p2ps()

def test_resource_exhaustion(self):
Expand Down
6 changes: 0 additions & 6 deletions test/functional/test_framework/test_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ def main(self):
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException:
Expand Down Expand Up @@ -298,8 +297,6 @@ def start_node(self, i, *args, **kwargs):
node.start(*args, **kwargs)
node.wait_for_rpc_connection()

time.sleep(10)

if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)

Expand All @@ -319,8 +316,6 @@ def start_nodes(self, extra_args=None, *args, **kwargs):
self.stop_nodes()
raise

time.sleep(10)

if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
Expand All @@ -338,7 +333,6 @@ def stop_nodes(self):

for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()

def restart_node(self, i, extra_args=None):
Expand Down
2 changes: 0 additions & 2 deletions test/functional/test_framework/test_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the pivxd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "pivxd exited with status %i during initialization" % self.process.returncode
try:
Expand Down Expand Up @@ -203,7 +202,6 @@ def is_node_stopped(self):
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
Expand Down
18 changes: 15 additions & 3 deletions test/functional/test_framework/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor
from subprocess import CalledProcessError
import time

Expand Down Expand Up @@ -384,11 +385,22 @@ def connect_nodes(from_connection, node_num):
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))

def connect_nodes_clique(nodes):
# max_workers should be the maximum number of nodes that we have in the same functional test,
# 15 seems to be a good upper bound
parallel_exec = ThreadPoolExecutor(max_workers=15)
l = len(nodes)
for a in range(l):
for b in range(a, l):

def connect_nodes_clique_internal(a):
for b in range(0, l):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
jobs = []
for a in range(l):
jobs.append(parallel_exec.submit(connect_nodes_clique_internal, a))

for job in jobs:
job.result()
jobs.clear()
parallel_exec.shutdown()

# Transaction/Block functions
#############################
Expand Down

0 comments on commit efaa521

Please sign in to comment.