Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Test] Speed up functional tests #2914

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions test/functional/p2p_invalid_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
wait_until,
)
from random import getrandbits

Expand Down Expand Up @@ -55,6 +56,10 @@ def on_getdata(self, message):
self.send_message(self.vec_mnp[inv.hash])
self.getdata_count+=1

def wait_for_p2p_messages(self, n_messages):
wait_until(lambda: self.getdata_count == n_messages, timeout=60)


class InvalidMessagesTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 1
Expand Down Expand Up @@ -217,18 +222,14 @@ def test_fill_askfor(self):
assert_equal(len(invs), 50000)
msg = messages.msg_inv(invs)
conn.send_message(msg)

time.sleep(30) # wait a bit
assert_equal(conn.getdata_count, 50000)
conn.wait_for_p2p_messages(50000)

# Prior #2611 the node was blocking any follow-up request.
mnp = msg_mnping(CTxIn(COutPoint(getrandbits(256))), getrandbits(256), int(time.time()))
conn.vec_mnp[mnp.get_hash()] = mnp
msg = messages.msg_inv([messages.CInv(15, mnp.get_hash())])
conn.send_and_ping(msg)
time.sleep(3)

assert_equal(conn.getdata_count, 50001)
conn.wait_for_p2p_messages(50001)
self.nodes[0].disconnect_p2ps()

def test_resource_exhaustion(self):
Expand Down
6 changes: 0 additions & 6 deletions test/functional/test_framework/test_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ def main(self):
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException:
Expand Down Expand Up @@ -298,8 +297,6 @@ def start_node(self, i, *args, **kwargs):
node.start(*args, **kwargs)
node.wait_for_rpc_connection()

time.sleep(10)

if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)

Expand All @@ -319,8 +316,6 @@ def start_nodes(self, extra_args=None, *args, **kwargs):
self.stop_nodes()
raise

time.sleep(10)

if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
Expand All @@ -338,7 +333,6 @@ def stop_nodes(self):

for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()

def restart_node(self, i, extra_args=None):
Expand Down
2 changes: 0 additions & 2 deletions test/functional/test_framework/test_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the pivxd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "pivxd exited with status %i during initialization" % self.process.returncode
try:
Expand Down Expand Up @@ -203,7 +202,6 @@ def is_node_stopped(self):
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
Expand Down
18 changes: 15 additions & 3 deletions test/functional/test_framework/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor
from subprocess import CalledProcessError
import time

Expand Down Expand Up @@ -384,11 +385,22 @@ def connect_nodes(from_connection, node_num):
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))

def connect_nodes_clique(nodes):
# max_workers should be the maximum number of nodes that we have in the same functional test,
# 15 seems to be a good upper bound
parallel_exec = ThreadPoolExecutor(max_workers=15)
l = len(nodes)
for a in range(l):
for b in range(a, l):

def connect_nodes_clique_internal(a):
for b in range(0, l):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
jobs = []
for a in range(l):
jobs.append(parallel_exec.submit(connect_nodes_clique_internal, a))

for job in jobs:
job.result()
jobs.clear()
parallel_exec.shutdown()

# Transaction/Block functions
#############################
Expand Down