Skip to content

Commit

Permalink
Merge pull request #2217 from AntelopeIO/GH-2189-trx-finality-fork-test
Browse files Browse the repository at this point in the history
IF: Test: Simplify and fix trx_finality_status_forked_test.py
  • Loading branch information
heifner authored Feb 7, 2024
2 parents 47cbaf2 + 63e8b62 commit 3ffeb9d
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 68 deletions.
5 changes: 2 additions & 3 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,8 @@ set_property(TEST trx_finality_status_if_test PROPERTY LABELS nonparallelizable_

add_test(NAME trx_finality_status_forked_test COMMAND tests/trx_finality_status_forked_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST trx_finality_status_forked_test PROPERTY LABELS nonparallelizable_tests)
# requires https://github.com/AntelopeIO/leap/issues/2189
#add_test(NAME trx_finality_status_forked_if_test COMMAND tests/trx_finality_status_forked_test.py -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
#set_property(TEST trx_finality_status_forked_if_test PROPERTY LABELS nonparallelizable_tests)
add_test(NAME trx_finality_status_forked_if_test COMMAND tests/trx_finality_status_forked_test.py -v --activate-if ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST trx_finality_status_forked_if_test PROPERTY LABELS nonparallelizable_tests)

add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_tests_properties(db_modes_test PROPERTIES COST 6000)
Expand Down
112 changes: 47 additions & 65 deletions tests/trx_finality_status_forked_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,20 @@
# trx_finality_status_forked_test
#
# Test to verify that transaction finality status feature is
# working appropriately when forks occur
# working appropriately when forks occur.
# Note this test does not use transaction retry as forked out
# transactions should always make it into a block unless they
# expire.
#
###############################################################
Print=Utils.Print
errorExit=Utils.errorExit


args = TestHelper.parse_args({"--prod-count","--activate-if","--dump-error-details","--keep-logs","-v","--leave-running",
args = TestHelper.parse_args({"--activate-if","--dump-error-details","--keep-logs","-v","--leave-running",
"--wallet-port","--unshared"})
Utils.Debug=args.v
totalProducerNodes=2
totalProducerNodes=4
totalNonProducerNodes=1
totalNodes=totalProducerNodes+totalNonProducerNodes
maxActiveProducers=3
Expand All @@ -37,12 +40,6 @@
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False

WalletdName=Utils.EosWalletName
ClientName="cleos"

EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
EOSIO_ACCT_PUBLIC_DEFAULT_KEY = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"

try:
TestHelper.printSystemInfo("BEGIN")

Expand All @@ -62,10 +59,11 @@

# *** setup topogrophy ***

# "bridge" shape connects defprocera through defproducerb (in node0) to each other and defproducerc is alone (in node01)
# and the only connection between those 2 groups is through the bridge node
if cluster.launch(prodCount=2, topo="bridge", pnodes=totalProducerNodes,
totalNodes=totalNodes, totalProducers=totalProducers, activateIF=activateIF,
# "bridge" shape connects defproducera (node0) defproducerb (node1) defproducerc (node2) to each other and defproducerd (node3)
# and the only connection between those 2 groups is through the bridge (node4)
if cluster.launch(topo="./tests/bridge_for_fork_test_shape.json", pnodes=totalProducerNodes,
totalNodes=totalNodes, totalProducers=totalProducerNodes, loadSystemContract=False,
activateIF=activateIF, biosFinalizer=False,
specificExtraNodeosArgs=specificExtraNodeosArgs,
extraNodeosArgs=extraNodeosArgs) is False:
Utils.cmdError("launcher")
Expand All @@ -75,40 +73,23 @@

# *** identify each node (producers and non-producing node) ***

nonProdNode=None
prodNodes=[]
producers=[]
for i, node in enumerate(cluster.getNodes()):
node.producers=Cluster.parseProducers(node.nodeId)
numProducers=len(node.producers)
Print(f"node {i} has producers={node.producers}")
if numProducers==0:
if nonProdNode is None:
nonProdNode=node
else:
Utils.errorExit("More than one non-producing nodes")
else:
prodNodes.append(node)
producers.extend(node.producers)

prodAB=prodNodes[0] # defproducera, defproducerb
prodC=prodNodes[1] # defproducerc
prodNode0 = cluster.getNode(0)
prodNode1 = cluster.getNode(1)
prodNode2 = cluster.getNode(2)
prodNode3 = cluster.getNode(3) # other side of bridge
nonProdNode = cluster.getNode(4)

prodNodes=[ prodNode0, prodNode1, prodNode2, prodNode3 ]

prodA=prodNode0 # defproducera
prodD=prodNode3 # defproducerc

# *** Identify a block where production is stable ***

#verify nodes are in sync and advancing
cluster.biosNode.kill(signal.SIGTERM)
cluster.waitOnClusterSync(blockAdvancing=5)

Print("Creating account1")
account1 = Account('account1')
account1.ownerPublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY
account1.activePublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY
cluster.createAccountAndVerify(account1, cluster.eosioAccount, stakedDeposit=1000)

Print("Validating accounts after bootstrap")
cluster.validateAccounts([account1])

# *** Killing the "bridge" node ***
Print('Sending command to kill "bridge" node to separate the 2 producer groups.')
# kill at the beginning of the production window for defproducera, so there is time for the fork for
Expand All @@ -125,12 +106,12 @@
while nonProdNode.verifyAlive() and count > 0:
# wait on prodNode 0 since it will continue to advance, since defproducera and defproducerb are its producers
Print("Wait for next block")
assert prodAB.waitForNextBlock(timeout=6), "Production node AB should continue to advance, even after bridge node is killed"
assert prodA.waitForNextBlock(timeout=6), "Production node A should continue to advance, even after bridge node is killed"
count -= 1

assert not nonProdNode.verifyAlive(), "Bridge node should have been killed if test was functioning correctly."

assert prodC.waitForNextBlock(), "Prod node C should continue to advance, even after bridge node is killed"
assert prodD.waitForNextBlock(), "Prod node D should continue to advance, even after bridge node is killed"

def getState(status):
assert status is not None, "ERROR: getTransactionStatus failed to return any status"
Expand All @@ -139,10 +120,11 @@ def getState(status):
return status["state"]

transferAmount = 10
transfer = prodC.transferFunds(cluster.eosioAccount, account1, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account")
# Does not use transaction retry (not needed)
transfer = prodD.transferFunds(cluster.eosioAccount, cluster.defproduceraAccount, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account")
transBlockNum = transfer['processed']['block_num']
transId = prodC.getLastTrackedTransactionId()
retStatus = prodC.getTransactionStatus(transId)
transId = prodD.getLastTrackedTransactionId()
retStatus = prodD.getTransactionStatus(transId)
state = getState(retStatus)

localState = "LOCALLY_APPLIED"
Expand All @@ -154,18 +136,18 @@ def getState(status):
assert state == localState or state == inBlockState, \
f"ERROR: getTransactionStatus didn't return \"{localState}\" or \"{inBlockState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}"

assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed"
assert prodD.waitForNextBlock(), "Production node D should continue to advance, even after bridge node is killed"

# since the Bridge node is killed when this producer is producing its last block in its window, there is plenty of time for the transfer to be
# sent before the first block is created, but adding this to ensure it is in one of these blocks
numTries = 2
while numTries > 0:
retStatus = prodC.getTransactionStatus(transId)
retStatus = prodD.getTransactionStatus(transId)
state = getState(retStatus)
if state == inBlockState:
break
numTries -= 1
assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed"
assert prodD.waitForNextBlock(), "Production node D should continue to advance, even after bridge node is killed"

Print(f"getTransactionStatus returned status: {json.dumps(retStatus, indent=1)}")
assert state == inBlockState, \
Expand All @@ -177,57 +159,57 @@ def getState(status):
if not nonProdNode.relaunch():
errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted")

while prodC.getInfo()['last_irreversible_block_num'] < transBlockNum:
Print("Wait for LIB to move, which indicates prodC may have forked out the branch")
assert prodC.waitForLibToAdvance(60), \
while prodD.getInfo()['last_irreversible_block_num'] < transBlockNum:
Print("Wait for LIB to move, which indicates prodD may have forked out the branch")
assert prodD.waitForLibToAdvance(60), \
"ERROR: Network did not reach consensus after bridge node was restarted."
if prodC.getTransactionStatus(transId)['state'] == forkedOutState:
if prodD.getTransactionStatus(transId)['state'] == forkedOutState:
break

retStatus = prodC.getTransactionStatus(transId)
retStatus = prodD.getTransactionStatus(transId)
state = getState(retStatus)

assert state == forkedOutState, \
f"ERROR: getTransactionStatus didn't return \"{forkedOutState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \
f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}"
f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}"

for prodNode in prodNodes:
info=prodNode.getInfo()
Print(f"node info: {json.dumps(info, indent=1)}")

assert prodC.waitForProducer("defproducerc"), \
assert prodD.waitForProducer("defproducerd"), \
f"Waiting for prodC to produce, but it never happened" + \
f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}"
f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}"

retStatus = prodC.getTransactionStatus(transId)
retStatus = prodD.getTransactionStatus(transId)
state = getState(retStatus)

assert state == inBlockState, \
f"ERROR: getTransactionStatus didn't return \"{inBlockState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \
f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}"
f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}"

afterForkInBlockState = retStatus
afterForkBlockId = retStatus["block_id"]
assert afterForkInBlockState["block_number"] > originalInBlockState["block_number"], \
"ERROR: The way the test is designed, the transaction should be added to a block that has a higher number than it was in originally before it was forked out." + \
f"\n\noriginal in block state: {json.dumps(originalInBlockState, indent=1)}\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}"

assert prodC.waitForBlock(afterForkInBlockState["block_number"], timeout=120, blockType=BlockType.lib), \
f"ERROR: Block never finalized.\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + \
assert prodD.waitForBlock(afterForkInBlockState["block_number"], timeout=120, blockType=BlockType.lib), \
f"ERROR: Block never finalized.\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodD.getInfo(), indent=1)}" + \
f"\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}"

retStatus = prodC.getTransactionStatus(transId)
retStatus = prodD.getTransactionStatus(transId)
if afterForkBlockId != retStatus["block_id"]: # might have been forked out, if so wait for new block to become LIB
assert prodC.waitForBlock(retStatus["block_number"], timeout=120, blockType=BlockType.lib), \
f"ERROR: Block never finalized.\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + \
assert prodD.waitForBlock(retStatus["block_number"], timeout=120, blockType=BlockType.lib), \
f"ERROR: Block never finalized.\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodD.getInfo(), indent=1)}" + \
f"\n\nafter fork in block state: {json.dumps(afterForkInBlockState, indent=1)}"

retStatus = prodC.getTransactionStatus(transId)
retStatus = prodD.getTransactionStatus(transId)
state = getState(retStatus)

assert state == irreversibleState, \
f"ERROR: getTransactionStatus didn't return \"{irreversibleState}\" state.\n\nstatus: {json.dumps(retStatus, indent=1)}" + \
f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}"
f"\n\nprod A info: {json.dumps(prodA.getInfo(), indent=1)}\n\nprod D info: {json.dumps(prodD.getInfo(), indent=1)}"

testSuccessful=True
finally:
Expand Down

0 comments on commit 3ffeb9d

Please sign in to comment.