diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go
index 6507a6cb82..e64811d6a9 100644
--- a/agreement/gossip/networkFull_test.go
+++ b/agreement/gossip/networkFull_test.go
@@ -103,7 +103,7 @@ func spinNetwork(t *testing.T, nodesCount int, cfg config.Local) ([]*networkImpl
break
}
}
- log.Infof("network established, %d nodes connected in %s", nodesCount, time.Now().Sub(start).String())
+ log.Infof("network established, %d nodes connected in %s", nodesCount, time.Since(start).String())
return networkImpls, msgCounters
}
diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go
index c7a8a9a4cf..fd99bcc612 100644
--- a/catchup/universalFetcher.go
+++ b/catchup/universalFetcher.go
@@ -88,7 +88,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro
} else {
return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer")
}
- downloadDuration = time.Now().Sub(blockDownloadStartTime)
+ downloadDuration = time.Since(blockDownloadStartTime)
block, cert, err := processBlockBytes(fetchedBuf, round, address)
if err != nil {
return nil, nil, time.Duration(0), err
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index a69ed5be98..1a2495007d 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -221,8 +221,7 @@ func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound
}
reportInfof(infoTxPending, txid, stat.LastRound)
- // WaitForRound waits until round "stat.LastRound+1" is committed
- stat, err = client.WaitForRound(stat.LastRound)
+ stat, err = client.WaitForRound(stat.LastRound + 1)
if err != nil {
return model.PendingTransactionResponse{}, fmt.Errorf(errorRequestFail, err)
}
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 6b82887695..df142de4ce 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -200,22 +200,23 @@ func waitForRound(restClient client.RestClient, cfg config, spendingRound bool)
time.Sleep(1 * time.Second)
continue
}
- if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound {
+ lastRound := nodeStatus.LastRound
+ if isSpendRound(cfg, lastRound) == spendingRound {
// time to send transactions.
return
}
if spendingRound {
- fmt.Printf("Last round %d, waiting for spending round %d\n", nodeStatus.LastRound, nextSpendRound(cfg, nodeStatus.LastRound))
+ fmt.Printf("Last round %d, waiting for spending round %d\n", lastRound, nextSpendRound(cfg, nodeStatus.LastRound))
}
for {
// wait for the next round.
- nodeStatus, err = restClient.WaitForBlock(basics.Round(nodeStatus.LastRound))
+ err = restClient.WaitForRoundWithTimeout(lastRound + 1)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to wait for next round node status : %v", err)
- time.Sleep(1 * time.Second)
break
}
- if isSpendRound(cfg, nodeStatus.LastRound) == spendingRound {
+ lastRound++
+ if isSpendRound(cfg, lastRound) == spendingRound {
// time to send transactions.
return
}
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index c349d3ecbf..e60ab1d36d 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -26,6 +26,7 @@ import (
"net/http"
"net/url"
"strings"
+ "time"
"github.com/google/go-querystring/query"
@@ -39,6 +40,8 @@ import (
"github.com/algorand/go-algorand/ledger/eval"
"github.com/algorand/go-algorand/ledger/ledgercore"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/test/e2e-go/globals"
)
const (
@@ -283,12 +286,77 @@ func (client RestClient) Status() (response model.NodeStatusResponse, err error)
return
}
-// WaitForBlock returns the node status after waiting for the given round.
-func (client RestClient) WaitForBlock(round basics.Round) (response model.NodeStatusResponse, err error) {
+// WaitForBlockAfter returns the node status after trying to wait for the given
+// round+1. This REST API has the documented misfeatures of returning after 1
+// minute, regardless of whether the given block has been reached.
+func (client RestClient) WaitForBlockAfter(round basics.Round) (response model.NodeStatusResponse, err error) {
err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d/", round), nil)
return
}
+// WaitForRound returns the node status after waiting for the given round.
+func (client RestClient) WaitForRound(round uint64, waitTime time.Duration) (status model.NodeStatusResponse, err error) {
+ timeout := time.NewTimer(waitTime)
+ for {
+ status, err = client.Status()
+ if err != nil {
+ return
+ }
+
+ if status.LastRound >= round {
+ return
+ }
+ select {
+ case <-timeout.C:
+ return model.NodeStatusResponse{}, fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound)
+ case <-time.After(200 * time.Millisecond):
+ }
+ }
+}
+
+const singleRoundMaxTime = globals.MaxTimePerRound * 40
+
+// WaitForRoundWithTimeout waits for a given round to be reached. As it
+// waits, it returns early with an error if the wait time for any round exceeds
+// globals.MaxTimePerRound so we can alert when we're getting "hung" waiting.
+func (client RestClient) WaitForRoundWithTimeout(roundToWaitFor uint64) error {
+ status, err := client.Status()
+ if err != nil {
+ return err
+ }
+ lastRound := status.LastRound
+
+ // If node is already at or past target round, we're done
+ if lastRound >= roundToWaitFor {
+ return nil
+ }
+
+ roundComplete := make(chan error, 2)
+
+ for nextRound := lastRound + 1; lastRound < roundToWaitFor; nextRound++ {
+ roundStarted := time.Now()
+
+ go func(done chan error) {
+ stat, err := client.WaitForRound(nextRound, singleRoundMaxTime)
+ lastRound = stat.LastRound
+ done <- err
+ }(roundComplete)
+
+ select {
+ case lastError := <-roundComplete:
+ if lastError != nil {
+ close(roundComplete)
+ return lastError
+ }
+ case <-time.After(singleRoundMaxTime):
+ // we've timed out.
+ time := time.Since(roundStarted)
+ return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound)
+ }
+ }
+ return nil
+}
+
// HealthCheck does a health check on the potentially running node,
// returning an error if the API is down
func (client RestClient) HealthCheck() error {
@@ -301,14 +369,6 @@ func (client RestClient) ReadyCheck() error {
return client.get(nil, "/ready", nil)
}
-// StatusAfterBlock waits for a block to occur then returns the StatusResponse after that block
-// blocks on the node end
-// Not supported
-func (client RestClient) StatusAfterBlock(blockNum uint64) (response model.NodeStatusResponse, err error) {
- err = client.get(&response, fmt.Sprintf("/v2/status/wait-for-block-after/%d", blockNum), nil)
- return
-}
-
type pendingTransactionsParams struct {
Max uint64 `url:"max"`
Format string `url:"format"`
@@ -557,6 +617,16 @@ func (client RestClient) RawBlock(round uint64) (response []byte, err error) {
return
}
+// EncodedBlockCert takes a round and returns its parsed block and certificate
+func (client RestClient) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) {
+ resp, err := client.RawBlock(round)
+ if err != nil {
+ return
+ }
+ err = protocol.Decode(resp, &blockCert)
+ return
+}
+
// Shutdown requests the node to shut itself down
func (client RestClient) Shutdown() (err error) {
response := 1
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 893f29450e..518528a3bc 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -224,8 +224,8 @@ func txnGroupBatchPrep(stxs []transactions.SignedTxn, contextHdr *bookkeeping.Bl
if stxn.Txn.Type == protocol.StateProofTx {
continue
}
- if stxn.Txn.Type == protocol.HeartbeatTx && len(stxs) == 1 {
- // TODO: Only allow free HB if the HbAddress is challenged
+ if stxn.Txn.Type == protocol.HeartbeatTx && stxn.Txn.Group.IsZero() {
+ // in apply.Heartbeat, we further confirm that the heartbeat is for a challenged node
continue
}
minFeeCount++
diff --git a/data/transactions/verify/txn_test.go b/data/transactions/verify/txn_test.go
index 282a031097..5946399d0b 100644
--- a/data/transactions/verify/txn_test.go
+++ b/data/transactions/verify/txn_test.go
@@ -575,7 +575,7 @@ func TestPaysetGroups(t *testing.T) {
startPaysetGroupsTime := time.Now()
err := PaysetGroups(context.Background(), txnGroups, blkHdr, verificationPool, MakeVerifiedTransactionCache(50000), nil)
require.NoError(t, err)
- paysetGroupDuration := time.Now().Sub(startPaysetGroupsTime)
+ paysetGroupDuration := time.Since(startPaysetGroupsTime)
// break the signature and see if it fails.
txnGroups[0][0].Sig[0] = txnGroups[0][0].Sig[0] + 1
@@ -609,7 +609,7 @@ func TestPaysetGroups(t *testing.T) {
// channel is closed without a return
require.Failf(t, "Channel got closed ?!", "")
} else {
- actualDuration := time.Now().Sub(startPaysetGroupsTime)
+ actualDuration := time.Since(startPaysetGroupsTime)
if err == nil {
if actualDuration > 4*time.Second {
// it took at least 2.5 seconds more than it should have had!
diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go
index d27510fe6a..03f5cac288 100644
--- a/data/transactions/verify/verifiedTxnCache_test.go
+++ b/data/transactions/verify/verifiedTxnCache_test.go
@@ -127,7 +127,7 @@ func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) {
for i := 0; i < measuringMultipler; i++ {
impl.GetUnverifiedTransactionGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion)
}
- duration := time.Now().Sub(startTime)
+ duration := time.Since(startTime)
// calculate time per 10K verified entries:
t := int(duration*10000) / (measuringMultipler * b.N)
b.ReportMetric(float64(t)/float64(time.Millisecond), "ms/10K_cache_compares")
diff --git a/heartbeat/service.go b/heartbeat/service.go
index f13280b4e0..d43fd196b1 100644
--- a/heartbeat/service.go
+++ b/heartbeat/service.go
@@ -26,7 +26,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
- "github.com/algorand/go-algorand/ledger/eval"
+ "github.com/algorand/go-algorand/ledger/apply"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -80,7 +80,7 @@ func (s *Service) Stop() {
func (s *Service) findChallenged(rules config.ProposerPayoutRules) []account.ParticipationRecordForRound {
current := s.ledger.LastRound()
- ch := eval.FindChallenge(rules, current, s.ledger, eval.ChRisky)
+ ch := apply.FindChallenge(rules, current, s.ledger, apply.ChRisky)
if ch.IsZero() {
return nil
}
@@ -93,8 +93,7 @@ func (s *Service) findChallenged(rules config.ProposerPayoutRules) []account.Par
continue
}
if acct.Status == basics.Online {
- lastSeen := max(acct.LastProposed, acct.LastHeartbeat)
- if ch.Failed(pr.Account, lastSeen) {
+ if ch.Failed(pr.Account, acct.LastSeen()) {
s.log.Infof(" %v needs a heartbeat\n", pr.Account)
found = append(found, pr)
}
@@ -135,6 +134,7 @@ func (s *Service) loop() {
for _, pr := range s.findChallenged(proto.Payouts) {
stxn := s.prepareHeartbeat(pr, lastHdr)
+ s.log.Infof("sending heartbeat %v for %v\n", stxn.Txn.HeartbeatTxnFields, pr.Account)
err = s.bcast.BroadcastInternalSignedTxGroup([]transactions.SignedTxn{stxn})
if err != nil {
s.log.Errorf("error broadcasting heartbeat %v for %v: %v", stxn, pr.Account, err)
diff --git a/heartbeat/service_test.go b/heartbeat/service_test.go
index 566645a5c4..3422ffdea4 100644
--- a/heartbeat/service_test.go
+++ b/heartbeat/service_test.go
@@ -210,7 +210,7 @@ func makeBlock(r basics.Round) bookkeeping.Block {
}
}
-func TestHeartBeatOnlyWhenChallenged(t *testing.T) {
+func TestHeartbeatOnlyWhenChallenged(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -234,9 +234,9 @@ func TestHeartBeatOnlyWhenChallenged(t *testing.T) {
// now they are online, but not challenged, so no heartbeat
acct.Status = basics.Online
acct.VoteKeyDilution = 100
- otss := crypto.GenerateOneTimeSignatureSecrets(
- basics.OneTimeIDForRound(ledger.LastRound(), acct.VoteKeyDilution).Batch,
- 5)
+ startBatch := basics.OneTimeIDForRound(ledger.LastRound(), acct.VoteKeyDilution).Batch
+ const batches = 50 // gives 50 * kd rounds = 5000
+ otss := crypto.GenerateOneTimeSignatureSecrets(startBatch, batches)
acct.VoteID = otss.OneTimeSignatureVerifier
ledger.addParticipant(joe, otss)
ledger.addParticipant(mary, otss)
diff --git a/ledger/apply/challenge.go b/ledger/apply/challenge.go
new file mode 100644
index 0000000000..fa060879e6
--- /dev/null
+++ b/ledger/apply/challenge.go
@@ -0,0 +1,116 @@
+// Copyright (C) 2019-2024 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package apply
+
+import (
+ "math/bits"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/committee"
+)
+
+// ChallengePeriod indicates which part of the challenge period is under discussion.
+type ChallengePeriod int
+
+const (
+ // ChRisky indicates that a challenge is in effect, and the initial grace period is running out.
+ ChRisky ChallengePeriod = iota
+ // ChActive indicates that a challenege is in effect, and the grace period
+ // has run out, so accounts can be suspended
+ ChActive
+)
+
+type challenge struct {
+ // round is when the challenge occurred. 0 means this is not a challenge.
+ round basics.Round
+ // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online
+ seed committee.Seed
+ bits int
+}
+
+type headerSource interface {
+ BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error)
+}
+
+// FindChallenge returns the Challenge that was last issued if it's in the period requested.
+func FindChallenge(rules config.ProposerPayoutRules, current basics.Round, headers headerSource, period ChallengePeriod) challenge {
+ // are challenges active?
+ interval := basics.Round(rules.ChallengeInterval)
+ if rules.ChallengeInterval == 0 || current < interval {
+ return challenge{}
+ }
+ lastChallenge := current - (current % interval)
+ grace := basics.Round(rules.ChallengeGracePeriod)
+ // FindChallenge is structured this way, instead of returning the challenge
+ // and letting the caller determine the period it cares about, to avoid
+ // using BlockHdr unnecessarily.
+ switch period {
+ case ChRisky:
+ if current <= lastChallenge+grace/2 || current > lastChallenge+grace {
+ return challenge{}
+ }
+ case ChActive:
+ if current <= lastChallenge+grace || current > lastChallenge+2*grace {
+ return challenge{}
+ }
+ }
+ challengeHdr, err := headers.BlockHdr(lastChallenge)
+ if err != nil {
+ panic(err)
+ }
+ challengeProto := config.Consensus[challengeHdr.CurrentProtocol]
+ // challenge is not considered if rules have changed since that round
+ if challengeProto.Payouts != rules {
+ return challenge{}
+ }
+ return challenge{lastChallenge, challengeHdr.Seed, rules.ChallengeBits}
+}
+
+// IsZero returns true if the challenge is empty (used to indicate no challenege)
+func (ch challenge) IsZero() bool {
+ return ch == challenge{}
+}
+
+// Failed returns true iff ch is in effect, matches address, and lastSeen is
+// before the challenge issue.
+func (ch challenge) Failed(address basics.Address, lastSeen basics.Round) bool {
+ return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round
+}
+
+// bitsMatch checks if the first n bits of two byte slices match. Written to
+// work on arbitrary slices, but we expect that n is small. Only user today
+// calls with n=5.
+func bitsMatch(a, b []byte, n int) bool {
+ // Ensure n is a valid number of bits to compare
+ if n < 0 || n > len(a)*8 || n > len(b)*8 {
+ return false
+ }
+
+ // Compare entire bytes when n is bigger than 8
+ for i := 0; i < n/8; i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ remaining := n % 8
+ if remaining == 0 {
+ return true
+ }
+ return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining
+}
diff --git a/ledger/apply/challenge_test.go b/ledger/apply/challenge_test.go
new file mode 100644
index 0000000000..3114b6f935
--- /dev/null
+++ b/ledger/apply/challenge_test.go
@@ -0,0 +1,121 @@
+// Copyright (C) 2019-2024 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package apply
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBitsMatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ for b := 0; b <= 6; b++ {
+ require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b)
+ }
+ require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7))
+ require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8))
+ require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9))
+
+ for b := 0; b <= 12; b++ {
+ require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b)
+ }
+ require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13))
+
+ // on a byte boundary
+ require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8))
+ require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9))
+ require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8))
+ require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9))
+}
+
+func TestFailsChallenge(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ a := assert.New(t)
+
+ // a valid challenge, with 4 matching bits, and an old last seen
+ a.True(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10))
+
+ // challenge isn't "on"
+ a.False(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10))
+ // node has appeared more recently
+ a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 12))
+ // bits don't match
+ a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xcf, 0x34}, 10))
+ // no enough bits match
+ a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}.Failed(basics.Address{0xbf, 0x34}, 10))
+}
+
+type singleSource bookkeeping.BlockHeader
+
+func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
+ return bookkeeping.BlockHeader(ss), nil
+}
+
+func TestActiveChallenge(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+ a := assert.New(t)
+
+ nowHeader := bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ // Here the rules are on, so they certainly differ from rules in oldHeader's params
+ CurrentProtocol: protocol.ConsensusFuture,
+ },
+ }
+ rules := config.Consensus[nowHeader.CurrentProtocol].Payouts
+
+ // simplest test. when interval=X and grace=G, X+G+1 is a challenge
+ inChallenge := basics.Round(rules.ChallengeInterval + rules.ChallengeGracePeriod + 1)
+ ch := FindChallenge(rules, inChallenge, singleSource(nowHeader), ChActive)
+ a.NotZero(ch.round)
+
+ // all rounds before that have no challenge
+ for r := basics.Round(1); r < inChallenge; r++ {
+ ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive)
+ a.Zero(ch.round, r)
+ }
+
+ // ChallengeGracePeriod rounds allow challenges starting with inChallenge
+ for r := inChallenge; r < inChallenge+basics.Round(rules.ChallengeGracePeriod); r++ {
+ ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive)
+ a.EqualValues(ch.round, rules.ChallengeInterval)
+ }
+
+ // And the next round is again challenge-less
+ ch = FindChallenge(rules, inChallenge+basics.Round(rules.ChallengeGracePeriod), singleSource(nowHeader), ChActive)
+ a.Zero(ch.round)
+
+ // ignore challenge if upgrade happened
+ oldHeader := bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ // We need a version from before payouts got turned on
+ CurrentProtocol: protocol.ConsensusV39,
+ },
+ }
+ ch = FindChallenge(rules, inChallenge, singleSource(oldHeader), ChActive)
+ a.Zero(ch.round)
+}
diff --git a/ledger/apply/heartbeat.go b/ledger/apply/heartbeat.go
index 4e858f9990..afb9af257a 100644
--- a/ledger/apply/heartbeat.go
+++ b/ledger/apply/heartbeat.go
@@ -31,6 +31,32 @@ func Heartbeat(hb transactions.HeartbeatTxnFields, header transactions.Header, b
return err
}
+ // In txnGroupBatchPrep, we do not charge for singleton (Group.IsZero)
+ // heartbeats. But we only _want_ to allow free heartbeats if the account is
+ // under challenge. If this is an underpaid singleton heartbeat, reject it
+ // unless the account is under challenge.
+
+ proto := balances.ConsensusParams()
+ if header.Fee.Raw < proto.MinTxnFee && header.Group.IsZero() {
+ kind := "free"
+ if header.Fee.Raw > 0 {
+ kind = "cheap"
+ }
+ if account.Status != basics.Online {
+ return fmt.Errorf("%s heartbeat is not allowed for %s %+v", kind, account.Status, hb.HbAddress)
+ }
+ if !account.IncentiveEligible {
+ return fmt.Errorf("%s heartbeat is not allowed for ineligible %+v", kind, hb.HbAddress)
+ }
+ ch := FindChallenge(proto.Payouts, round, provider, ChRisky)
+ if ch.round == 0 {
+ return fmt.Errorf("%s heartbeat for %s is not allowed with no challenge", kind, hb.HbAddress)
+ }
+ if !ch.Failed(hb.HbAddress, account.LastSeen()) {
+ return fmt.Errorf("%s heartbeat for %s is not challenged by %+v", kind, hb.HbAddress, ch)
+ }
+ }
+
// Note the contrast with agreement. We are using the account's _current_
// partkey to verify the heartbeat. This is required because we can only
// look 320 rounds back for voting information. If a heartbeat was delayed a
@@ -53,7 +79,7 @@ func Heartbeat(hb transactions.HeartbeatTxnFields, header transactions.Header, b
if sv.IsEmpty() {
return fmt.Errorf("heartbeat address %s has no voting keys", hb.HbAddress)
}
- kd := oad.KeyDilution(balances.ConsensusParams())
+ kd := oad.KeyDilution(proto)
// heartbeats are expected to sign with the partkey for their last-valid round
id := basics.OneTimeIDForRound(header.LastValid, kd)
diff --git a/ledger/apply/heartbeat_test.go b/ledger/apply/heartbeat_test.go
index 42f9cd4732..f8607d915c 100644
--- a/ledger/apply/heartbeat_test.go
+++ b/ledger/apply/heartbeat_test.go
@@ -17,15 +17,17 @@
package apply
import (
+ "fmt"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/committee"
- "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/txntest"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -64,23 +66,33 @@ func TestHeartbeat(t *testing.T) {
Seed: seed,
})
- tx := transactions.Transaction{
- Type: protocol.HeartbeatTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: basics.MicroAlgos{Raw: 1},
- FirstValid: fv,
- LastValid: lv,
- },
- HeartbeatTxnFields: transactions.HeartbeatTxnFields{
- HbAddress: voter,
- HbProof: otss.Sign(id, seed),
- HbSeed: seed,
- },
+ test := txntest.Txn{
+ Type: protocol.HeartbeatTx,
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: 1},
+ FirstValid: fv,
+ LastValid: lv,
+ HbAddress: voter,
+ HbProof: otss.Sign(id, seed),
+ HbSeed: seed,
}
+ tx := test.Txn()
+
rnd := basics.Round(150)
+ // no fee
err := Heartbeat(tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd)
+ require.ErrorContains(t, err, "cheap heartbeat")
+
+ test.Fee = basics.MicroAlgos{Raw: 10}
+ tx = test.Txn()
+ // just as bad: cheap
+ err = Heartbeat(tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd)
+ require.ErrorContains(t, err, "cheap heartbeat")
+
+ test.Fee = 1000
+ tx = test.Txn()
+ err = Heartbeat(tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, rnd)
require.NoError(t, err)
after, err := mockBal.Get(voter, false)
@@ -88,3 +100,82 @@ func TestHeartbeat(t *testing.T) {
require.Equal(t, rnd, after.LastHeartbeat)
require.Zero(t, after.LastProposed) // unchanged
}
+
+// TestCheapRules ensures a heartbeat can only have a low fee if the account
+// being heartbeat for is online, under risk of suspension by challenge, and
+// incentive eligible.
+func TestCheapRules(t *testing.T) {
+ type tcase struct {
+ rnd basics.Round
+ addrStart byte
+ status basics.Status
+ incentiveEligble bool
+ err string
+ }
+
+ // Grace period is 200. For the second half of the grace period (1101-1200),
+ // the heartbeat is free for online, incentive eligible, challenged accounts.
+ cases := []tcase{
+ // test of range
+ {1100, 0x01, basics.Online, true, "no challenge"},
+ {1101, 0x01, basics.Online, true, ""},
+ {1200, 0x01, basics.Online, true, ""},
+ {1201, 0x01, basics.Online, true, "no challenge"},
+
+ // test of the other requirements
+ {1101, 0xf1, basics.Online, true, "not challenged by"},
+ {1101, 0x01, basics.Offline, true, "not allowed for Offline"},
+ {1101, 0x01, basics.Online, false, "not allowed for ineligible"},
+ }
+ for _, tc := range cases {
+ const keyDilution = 777
+
+ lv := basics.Round(tc.rnd + 10)
+
+ id := basics.OneTimeIDForRound(lv, keyDilution)
+ otss := crypto.GenerateOneTimeSignatureSecrets(1, 10) // This will cover rounds 1-10*777
+
+ sender := basics.Address{0x01}
+ voter := basics.Address{tc.addrStart}
+ mockBal := makeMockBalancesWithAccounts(protocol.ConsensusFuture, map[basics.Address]basics.AccountData{
+ sender: {
+ MicroAlgos: basics.MicroAlgos{Raw: 10_000_000},
+ },
+ voter: {
+ Status: tc.status,
+ MicroAlgos: basics.MicroAlgos{Raw: 100_000_000},
+ VoteID: otss.OneTimeSignatureVerifier,
+ VoteKeyDilution: keyDilution,
+ IncentiveEligible: tc.incentiveEligble,
+ },
+ })
+
+ seed := committee.Seed{0x01, 0x02, 0x03}
+ mockHdr := makeMockHeaders()
+ mockHdr.setFallback(bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusFuture,
+ },
+ Seed: seed,
+ })
+ txn := txntest.Txn{
+ Type: protocol.HeartbeatTx,
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: 1},
+ FirstValid: tc.rnd - 10,
+ LastValid: tc.rnd + 10,
+ HbAddress: voter,
+ HbProof: otss.Sign(id, seed),
+ HbSeed: seed,
+ }
+
+ tx := txn.Txn()
+ fmt.Printf("tc %+v\n", tc)
+ err := Heartbeat(tx.HeartbeatTxnFields, tx.Header, mockBal, mockHdr, tc.rnd)
+ if tc.err == "" {
+ assert.NoError(t, err)
+ } else {
+ assert.ErrorContains(t, err, tc.err, "%+v", tc)
+ }
+ }
+}
diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go
index a5f636fc08..a18500341e 100644
--- a/ledger/apply/mockBalances_test.go
+++ b/ledger/apply/mockBalances_test.go
@@ -275,7 +275,8 @@ func (b *mockCreatableBalances) HasAssetParams(addr basics.Address, aidx basics.
}
type mockHeaders struct {
- b map[basics.Round]bookkeeping.BlockHeader
+ perRound map[basics.Round]bookkeeping.BlockHeader
+ fallback *bookkeeping.BlockHeader
}
// makeMockHeaders takes a bunch of BlockHeaders and returns a HdrProivder for them.
@@ -284,12 +285,21 @@ func makeMockHeaders(hdrs ...bookkeeping.BlockHeader) mockHeaders {
for _, hdr := range hdrs {
b[hdr.Round] = hdr
}
- return mockHeaders{b: b}
+ return mockHeaders{perRound: b}
}
func (m mockHeaders) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
- if hdr, ok := m.b[r]; ok {
+ if hdr, ok := m.perRound[r]; ok {
return hdr, nil
}
+ if m.fallback != nil {
+ copy := *m.fallback
+ copy.Round = r
+ return copy, nil
+ }
return bookkeeping.BlockHeader{}, fmt.Errorf("round %v is not present", r)
}
+
+func (m *mockHeaders) setFallback(hdr bookkeeping.BlockHeader) {
+ m.fallback = &hdr
+}
diff --git a/ledger/eval/eval.go b/ledger/eval/eval.go
index f98ed4cb5d..2e5c8f00a3 100644
--- a/ledger/eval/eval.go
+++ b/ledger/eval/eval.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"math"
- "math/bits"
"sync"
"github.com/algorand/go-algorand/agreement"
@@ -29,7 +28,6 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/data/transactions/verify"
@@ -1609,14 +1607,6 @@ func (eval *BlockEvaluator) proposerPayout() (basics.MicroAlgos, error) {
return basics.MinA(total, available), nil
}
-type challenge struct {
- // round is when the challenge occurred. 0 means this is not a challenge.
- round basics.Round
- // accounts that match the first `bits` of `seed` must propose or heartbeat to stay online
- seed committee.Seed
- bits int
-}
-
// generateKnockOfflineAccountsList creates the lists of expired or absent
// participation accounts by traversing over the modified accounts in the state
// deltas and testing if any of them needs to be reset/suspended. Expiration
@@ -1641,7 +1631,7 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList(participating []bas
updates := &eval.block.ParticipationUpdates
- ch := FindChallenge(eval.proto.Payouts, current, eval.state, ChActive)
+ ch := apply.FindChallenge(eval.proto.Payouts, current, eval.state, apply.ChActive)
onlineStake, err := eval.state.onlineStake()
if err != nil {
logging.Base().Errorf("unable to fetch online stake, no knockoffs: %v", err)
@@ -1754,28 +1744,6 @@ func (eval *BlockEvaluator) generateKnockOfflineAccountsList(participating []bas
}
}
-// bitsMatch checks if the first n bits of two byte slices match. Written to
-// work on arbitrary slices, but we expect that n is small. Only user today
-// calls with n=5.
-func bitsMatch(a, b []byte, n int) bool {
- // Ensure n is a valid number of bits to compare
- if n < 0 || n > len(a)*8 || n > len(b)*8 {
- return false
- }
-
- // Compare entire bytes when n is bigger than 8
- for i := 0; i < n/8; i++ {
- if a[i] != b[i] {
- return false
- }
- }
- remaining := n % 8
- if remaining == 0 {
- return true
- }
- return bits.LeadingZeros8(a[n/8]^b[n/8]) >= remaining
-}
-
func isAbsent(totalOnlineStake basics.MicroAlgos, acctStake basics.MicroAlgos, lastSeen basics.Round, current basics.Round) bool {
// Don't consider accounts that were online when payouts went into effect as
// absent. They get noticed the next time they propose or keyreg, which
@@ -1793,66 +1761,6 @@ func isAbsent(totalOnlineStake basics.MicroAlgos, acctStake basics.MicroAlgos, l
return lastSeen+basics.Round(allowableLag) < current
}
-type headerSource interface {
- BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error)
-}
-
-// ChallengePeriod indicates which part of the challenge period is under discussion.
-type ChallengePeriod int
-
-const (
- // ChRisky indicates that a challenge is in effect, and the initial grace period is running out.
- ChRisky ChallengePeriod = iota
- // ChActive indicates that a challenege is in effect, and the grace period
- // has run out, so accounts can be suspended
- ChActive
-)
-
-// FindChallenge returns the Challenge that was last issued if it's in the period requested.
-func FindChallenge(rules config.ProposerPayoutRules, current basics.Round, headers headerSource, period ChallengePeriod) challenge {
- // are challenges active?
- interval := basics.Round(rules.ChallengeInterval)
- if rules.ChallengeInterval == 0 || current < interval {
- return challenge{}
- }
- lastChallenge := current - (current % interval)
- grace := basics.Round(rules.ChallengeGracePeriod)
- // FindChallenge is structured this way, instead of returning the challenge
- // and letting the caller determine the period it cares about, to avoid
- // using BlockHdr unnecessarily.
- switch period {
- case ChRisky:
- if current <= lastChallenge+grace/2 || current > lastChallenge+grace {
- return challenge{}
- }
- case ChActive:
- if current <= lastChallenge+grace || current > lastChallenge+2*grace {
- return challenge{}
- }
- }
- challengeHdr, err := headers.BlockHdr(lastChallenge)
- if err != nil {
- panic(err)
- }
- challengeProto := config.Consensus[challengeHdr.CurrentProtocol]
- // challenge is not considered if rules have changed since that round
- if challengeProto.Payouts != rules {
- return challenge{}
- }
- return challenge{lastChallenge, challengeHdr.Seed, rules.ChallengeBits}
-}
-
-// IsZero returns true if the challenge is empty (used to indicate no challenege)
-func (ch challenge) IsZero() bool {
- return ch == challenge{}
-}
-
-// Failed returns true iff ch is in effect, matches address, and lastSeen is
-// before the challenge issue.
-func (ch challenge) Failed(address basics.Address, lastSeen basics.Round) bool {
- return ch.round != 0 && bitsMatch(ch.seed[:], address[:], ch.bits) && lastSeen < ch.round
-}
-
// validateExpiredOnlineAccounts tests the expired online accounts specified in ExpiredParticipationAccounts, and verify
// that they have all expired and need to be reset.
func (eval *BlockEvaluator) validateExpiredOnlineAccounts() error {
@@ -1920,7 +1828,7 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error {
// For consistency with expired account handling, we preclude duplicates
addressSet := make(map[basics.Address]bool, suspensionCount)
- ch := FindChallenge(eval.proto.Payouts, eval.Round(), eval.state, ChActive)
+ ch := apply.FindChallenge(eval.proto.Payouts, eval.Round(), eval.state, apply.ChActive)
totalOnlineStake, err := eval.state.onlineStake()
if err != nil {
logging.Base().Errorf("unable to fetch online stake, can't check knockoffs: %v", err)
@@ -1948,15 +1856,14 @@ func (eval *BlockEvaluator) validateAbsentOnlineAccounts() error {
return fmt.Errorf("proposed absent account %v with zero algos", accountAddr)
}
- lastSeen := max(acctData.LastProposed, acctData.LastHeartbeat)
oad, lErr := eval.state.lookupAgreement(accountAddr)
if lErr != nil {
return fmt.Errorf("unable to check absent account: %v", accountAddr)
}
- if isAbsent(totalOnlineStake, oad.VotingStake(), lastSeen, eval.Round()) {
+ if isAbsent(totalOnlineStake, oad.VotingStake(), acctData.LastSeen(), eval.Round()) {
continue // ok. it's "normal absent"
}
- if ch.Failed(accountAddr, lastSeen) {
+ if ch.Failed(accountAddr, acctData.LastSeen()) {
continue // ok. it's "challenge absent"
}
return fmt.Errorf("proposed absent account %v is not absent in %d, %d",
diff --git a/ledger/eval/eval_test.go b/ledger/eval/eval_test.go
index 8c8e2cf233..d33996b2d8 100644
--- a/ledger/eval/eval_test.go
+++ b/ledger/eval/eval_test.go
@@ -1605,29 +1605,6 @@ func TestExpiredAccountGeneration(t *testing.T) {
require.NotZero(t, propAcct.StateProofID)
}
-func TestBitsMatch(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
-
- for b := 0; b <= 6; b++ {
- require.True(t, bitsMatch([]byte{0x1}, []byte{0x2}, b), "%d", b)
- }
- require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 7))
- require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 8))
- require.False(t, bitsMatch([]byte{0x1}, []byte{0x2}, 9))
-
- for b := 0; b <= 12; b++ {
- require.True(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, b), "%d", b)
- }
- require.False(t, bitsMatch([]byte{0x1, 0xff, 0xaa}, []byte{0x1, 0xf0}, 13))
-
- // on a byte boundary
- require.True(t, bitsMatch([]byte{0x1}, []byte{0x1}, 8))
- require.False(t, bitsMatch([]byte{0x1}, []byte{0x1}, 9))
- require.True(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 0x00}, 8))
- require.False(t, bitsMatch([]byte{0x1, 0xff}, []byte{0x1, 00}, 9))
-}
-
func TestIsAbsent(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -1646,72 +1623,3 @@ func TestIsAbsent(t *testing.T) {
a.False(absent(1000, 10, 0, 6000))
a.False(absent(1000, 10, 0, 6001))
}
-
-func TestFailsChallenge(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
- a := assert.New(t)
-
- // a valid challenge, with 4 matching bits, and an old last seen
- a.True(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10))
-
- // challenge isn't "on"
- a.False(challenge{round: 0, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 10))
- // node has appeared more recently
- a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xbf, 0x34}, 12))
- // bits don't match
- a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 4}.Failed(basics.Address{0xcf, 0x34}, 10))
- // no enough bits match
- a.False(challenge{round: 11, seed: [32]byte{0xb0, 0xb4}, bits: 5}.Failed(basics.Address{0xbf, 0x34}, 10))
-}
-
-type singleSource bookkeeping.BlockHeader
-
-func (ss singleSource) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) {
- return bookkeeping.BlockHeader(ss), nil
-}
-
-func TestActiveChallenge(t *testing.T) {
- partitiontest.PartitionTest(t)
- t.Parallel()
- a := assert.New(t)
-
- nowHeader := bookkeeping.BlockHeader{
- UpgradeState: bookkeeping.UpgradeState{
- // Here the rules are on, so they certainly differ from rules in oldHeader's params
- CurrentProtocol: protocol.ConsensusFuture,
- },
- }
- rules := config.Consensus[nowHeader.CurrentProtocol].Payouts
-
- // simplest test. when interval=X and grace=G, X+G+1 is a challenge
- inChallenge := basics.Round(rules.ChallengeInterval + rules.ChallengeGracePeriod + 1)
- ch := FindChallenge(rules, inChallenge, singleSource(nowHeader), ChActive)
- a.NotZero(ch.round)
-
- // all rounds before that have no challenge
- for r := basics.Round(1); r < inChallenge; r++ {
- ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive)
- a.Zero(ch.round, r)
- }
-
- // ChallengeGracePeriod rounds allow challenges starting with inChallenge
- for r := inChallenge; r < inChallenge+basics.Round(rules.ChallengeGracePeriod); r++ {
- ch := FindChallenge(rules, r, singleSource(nowHeader), ChActive)
- a.EqualValues(ch.round, rules.ChallengeInterval)
- }
-
- // And the next round is again challenge-less
- ch = FindChallenge(rules, inChallenge+basics.Round(rules.ChallengeGracePeriod), singleSource(nowHeader), ChActive)
- a.Zero(ch.round)
-
- // ignore challenge if upgrade happened
- oldHeader := bookkeeping.BlockHeader{
- UpgradeState: bookkeeping.UpgradeState{
- // We need a version from before payouts got turned on
- CurrentProtocol: protocol.ConsensusV39,
- },
- }
- ch = FindChallenge(rules, inChallenge, singleSource(oldHeader), ChActive)
- a.Zero(ch.round)
-}
diff --git a/ledger/ledgercore/accountdata.go b/ledger/ledgercore/accountdata.go
index 5b17730122..ea7b150a6e 100644
--- a/ledger/ledgercore/accountdata.go
+++ b/ledger/ledgercore/accountdata.go
@@ -135,10 +135,15 @@ func (u *AccountData) Suspend() {
}
// Suspended returns true if the account is suspended (offline with keys)
-func (u *AccountData) Suspended() bool {
+func (u AccountData) Suspended() bool {
return u.Status == basics.Offline && !u.VoteID.IsEmpty()
}
+// LastSeen returns the last round that the account was seen online
+func (u AccountData) LastSeen() basics.Round {
+ return max(u.LastProposed, u.LastHeartbeat)
+}
+
// MinBalance computes the minimum balance requirements for an account based on
// some consensus parameters. MinBalance should correspond roughly to how much
// storage the account is allowed to store on disk.
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index f3f1c67192..e7739e085c 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -28,7 +28,6 @@ import (
v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
kmdclient "github.com/algorand/go-algorand/daemon/kmd/client"
"github.com/algorand/go-algorand/ledger/ledgercore"
- "github.com/algorand/go-algorand/rpcs"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
@@ -831,53 +830,43 @@ func (c *Client) Block(round uint64) (resp v2.BlockResponseJSON, err error) {
// RawBlock takes a round and returns its block
func (c *Client) RawBlock(round uint64) (resp []byte, err error) {
algod, err := c.ensureAlgodClient()
- if err == nil {
- resp, err = algod.RawBlock(round)
- }
- return
-}
-
-// EncodedBlockCert takes a round and returns its parsed block and certificate
-func (c *Client) EncodedBlockCert(round uint64) (blockCert rpcs.EncodedBlockCert, err error) {
- algod, err := c.ensureAlgodClient()
- if err == nil {
- var resp []byte
- resp, err = algod.RawBlock(round)
- if err == nil {
- err = protocol.Decode(resp, &blockCert)
- if err != nil {
- return
- }
- }
+ if err != nil {
+ return
}
- return
+ return algod.RawBlock(round)
}
// BookkeepingBlock takes a round and returns its block
func (c *Client) BookkeepingBlock(round uint64) (block bookkeeping.Block, err error) {
- blockCert, err := c.EncodedBlockCert(round)
- if err == nil {
- return blockCert.Block, nil
+ algod, err := c.ensureAlgodClient()
+ if err != nil {
+ return
}
- return
+ blockCert, err := algod.EncodedBlockCert(round)
+ if err != nil {
+ return
+ }
+ return blockCert.Block, nil
}
// HealthCheck returns an error if something is wrong
func (c *Client) HealthCheck() error {
algod, err := c.ensureAlgodClient()
- if err == nil {
- err = algod.HealthCheck()
+ if err != nil {
+ return err
}
- return err
+ return algod.HealthCheck()
}
-// WaitForRound takes a round, waits until it appears and returns its status. This function blocks.
+// WaitForRound takes a round, waits up to one minute, for it to appear and
+// returns the node status. This function blocks and fails if the block does not
+// appear in one minute.
func (c *Client) WaitForRound(round uint64) (resp model.NodeStatusResponse, err error) {
algod, err := c.ensureAlgodClient()
- if err == nil {
- resp, err = algod.StatusAfterBlock(round)
+ if err != nil {
+ return
}
- return
+ return algod.WaitForRound(round, time.Minute)
}
// GetBalance takes an address and returns its total balance; if the address doesn't exist, it returns 0.
diff --git a/network/connPerfMon_test.go b/network/connPerfMon_test.go
index 560be72a96..4c2bc5f034 100644
--- a/network/connPerfMon_test.go
+++ b/network/connPerfMon_test.go
@@ -103,14 +103,14 @@ func TestConnMonitorStageTiming(t *testing.T) {
startTestTime := time.Now().UnixNano()
perfMonitor := makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag})
// measure measuring overhead.
- measuringOverhead := time.Now().Sub(time.Now())
+ measuringOverhead := time.Since(time.Now())
perfMonitor.Reset(peers)
for msgIdx, msg := range msgPool {
msg.Received += startTestTime
beforeNotify := time.Now()
beforeNotifyStage := perfMonitor.stage
perfMonitor.Notify(&msg)
- notifyTime := time.Now().Sub(beforeNotify)
+ notifyTime := time.Since(beforeNotify)
stageTimings[beforeNotifyStage] += notifyTime
stageNotifyCalls[beforeNotifyStage]++
if perfMonitor.GetPeersStatistics() != nil {
diff --git a/test/e2e-go/features/accountPerf/sixMillion_test.go b/test/e2e-go/features/accountPerf/sixMillion_test.go
index 946d1b24b6..94feb3e9eb 100644
--- a/test/e2e-go/features/accountPerf/sixMillion_test.go
+++ b/test/e2e-go/features/accountPerf/sixMillion_test.go
@@ -1024,13 +1024,10 @@ func checkPoint(counter, firstValid, tLife uint64, force bool, fixture *fixtures
if verbose {
fmt.Printf("Waiting for round %d...", int(lastRound))
}
- nodeStat, err := fixture.AlgodClient.WaitForBlock(basics.Round(lastRound - 1))
+ nodeStat, err := fixture.AlgodClient.WaitForRound(lastRound, time.Minute)
if err != nil {
return 0, 0, fmt.Errorf("failed to wait for block %d : %w", lastRound, err)
}
- if nodeStat.LastRound < lastRound {
- return 0, 0, fmt.Errorf("failed to wait for block %d : node is at round %d", lastRound, nodeStat.LastRound)
- }
return 0, nodeStat.LastRound + 1, nil
}
return counter, firstValid, nil
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index 2e3ac87943..adc8c43f18 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -56,9 +56,8 @@ func TestBasicCatchup(t *testing.T) {
a.NoError(err)
// Let the network make some progress
- a.NoError(err)
waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(3)
a.NoError(err)
// Now spin up third node
@@ -71,7 +70,7 @@ func TestBasicCatchup(t *testing.T) {
defer shutdownClonedNode(cloneDataDir, &fixture, t)
// Now, catch up
- err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
+ _, err = cloneClient.WaitForRound(waitForRound)
a.NoError(err)
}
@@ -155,7 +154,7 @@ func runCatchupOverGossip(t fixtures.TestingTB,
// Let the secondary make progress up to round 3, while the primary was never startred ( hence, it's on round = 0)
waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// stop the secondary, which is on round 3 or more.
@@ -167,7 +166,7 @@ func runCatchupOverGossip(t fixtures.TestingTB,
a.NoError(err)
// Now, catch up
- err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(lg, waitForRound)
+ _, err = lg.WaitForRound(waitForRound)
a.NoError(err)
waitStart := time.Now()
@@ -184,7 +183,7 @@ func runCatchupOverGossip(t fixtures.TestingTB,
break
}
- if time.Now().Sub(waitStart) > time.Minute {
+ if time.Since(waitStart) > time.Minute {
// it's taking too long.
a.FailNow("Waiting too long for catchup to complete")
}
@@ -258,7 +257,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
// Let the network make some progress
a.NoError(err)
waitForRound := uint64(3) // UpgradeVoteRounds + DefaultUpgradeWaitRounds
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Now spin up third node
@@ -274,7 +273,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
defer shutdownClonedNode(cloneDataDir, &fixture, t)
// Now, catch up
- err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
+ _, err = cloneClient.WaitForRound(waitForRound)
a.NoError(err)
timeout := time.NewTimer(20 * time.Second)
@@ -374,7 +373,7 @@ func TestBasicCatchupCompletes(t *testing.T) {
a.NoError(err)
// Wait for the network to make some progess.
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Start the third node to catchup.
@@ -384,7 +383,7 @@ func TestBasicCatchupCompletes(t *testing.T) {
defer shutdownClonedNode(cloneDataDir, &fixture, t)
// Wait for it to catchup
- err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(cloneClient, waitForRound)
+ _, err = cloneClient.WaitForRound(waitForRound)
a.NoError(err)
// Calculate the catchup time
diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go
index 3a1eefedc4..0a1d522cac 100644
--- a/test/e2e-go/features/catchup/catchpointCatchup_test.go
+++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go
@@ -46,7 +46,7 @@ import (
const basicTestCatchpointInterval = 4
func waitForCatchpointGeneration(t *testing.T, fixture *fixtures.RestClientFixture, client client.RestClient, catchpointRound basics.Round) string {
- err := fixture.ClientWaitForRoundWithTimeout(client, uint64(catchpointRound+1))
+ err := client.WaitForRoundWithTimeout(uint64(catchpointRound + 1))
if err != nil {
return ""
}
@@ -212,7 +212,7 @@ func startCatchpointGeneratingNode(a *require.Assertions, fixture *fixtures.Rest
restClient := fixture.GetAlgodClientForController(nodeController)
// We don't want to start using the node without it being properly initialized.
- err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
+ err = restClient.WaitForRoundWithTimeout(1)
a.NoError(err)
return nodeController, restClient, &errorsCollector
@@ -239,7 +239,7 @@ func startCatchpointUsingNode(a *require.Assertions, fixture *fixtures.RestClien
restClient := fixture.GetAlgodClientForController(nodeController)
// We don't want to start using the node without it being properly initialized.
- err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
+ err = restClient.WaitForRoundWithTimeout(1)
a.NoError(err)
return nodeController, restClient, wp, &errorsCollector
@@ -263,7 +263,7 @@ func startCatchpointNormalNode(a *require.Assertions, fixture *fixtures.RestClie
restClient := fixture.GetAlgodClientForController(nodeController)
// We don't want to start using the node without it being properly initialized.
- err = fixture.ClientWaitForRoundWithTimeout(restClient, 1)
+ err = restClient.WaitForRoundWithTimeout(1)
a.NoError(err)
return nodeController, restClient, &errorsCollector
@@ -365,7 +365,7 @@ func TestBasicCatchpointCatchup(t *testing.T) {
_, err = usingNodeRestClient.Catchup(catchpointLabel, 0)
a.NoError(err)
- err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1))
+ err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1))
a.NoError(err)
// ensure the raw block can be downloaded (including cert)
@@ -438,7 +438,7 @@ func TestCatchpointLabelGeneration(t *testing.T) {
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
log.Infof("Building ledger history..")
for {
- err = fixture.ClientWaitForRound(primaryNodeRestClient, currentRound, 45*time.Second)
+ _, err = primaryNodeRestClient.WaitForRound(currentRound+1, 45*time.Second)
a.NoError(err)
if targetRound <= currentRound {
break
@@ -553,8 +553,7 @@ func TestNodeTxHandlerRestart(t *testing.T) {
// Wait for the network to start making progress again
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
- err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound,
- 10*catchpointCatchupProtocol.AgreementFilterTimeout)
+ _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout)
a.NoError(err)
// let the 2nd client send a transaction
@@ -674,8 +673,7 @@ func TestReadyEndpoint(t *testing.T) {
// Wait for the network to start making progress again
primaryNodeRestClient := fixture.GetAlgodClientForController(primaryNode)
- err = fixture.ClientWaitForRound(primaryNodeRestClient, targetRound,
- 10*catchpointCatchupProtocol.AgreementFilterTimeout)
+ _, err = primaryNodeRestClient.WaitForRound(targetRound, 10*catchpointCatchupProtocol.AgreementFilterTimeout)
a.NoError(err)
// The primary node has reached the target round,
diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
index 5dcbc11452..f9639abeb1 100644
--- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go
+++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go
@@ -115,7 +115,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) {
}
// wait for fastcatchup to complete and the node is synced
- err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound+1))
+ err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound + 1))
a.NoError(err)
primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode)
@@ -174,7 +174,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) {
roundAfterSPGeneration := targetCatchpointRound.RoundUpToMultipleOf(basics.Round(consensusParams.StateProofInterval)) +
basics.Round(consensusParams.StateProofInterval/2)
- err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(roundAfterSPGeneration))
+ err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(roundAfterSPGeneration))
a.NoError(err)
primaryLibGoal := fixture.GetLibGoalClientFromNodeController(primaryNode)
@@ -234,14 +234,14 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) {
primaryNodeAddr, err := primaryNode.GetListeningAddress()
a.NoError(err)
- err = fixture.ClientWaitForRoundWithTimeout(primaryNodeRestClient, 3)
+ err = primaryNodeRestClient.WaitForRoundWithTimeout(3)
a.NoError(err)
normalNode, normalNodeRestClient, normalNodeEC := startCatchpointNormalNode(a, &fixture, "Node1", primaryNodeAddr)
defer normalNodeEC.Print()
defer normalNode.StopAlgod()
- err = fixture.ClientWaitForRoundWithTimeout(normalNodeRestClient, 3)
+ err = normalNodeRestClient.WaitForRoundWithTimeout(3)
a.NoError(err)
// at this point PrimaryNode and Node1 would pass round 3. Before running Node2 we remove block 2 from Primary database.
@@ -267,7 +267,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) {
_, err = usingNodeRestClient.Catchup(catchpointLabel, 0)
a.NoError(err)
- err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetCatchpointRound)+1)
+ err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetCatchpointRound) + 1)
a.NoError(err)
lastNormalRound, err := fixture.GetLibGoalClientFromNodeController(normalNode).CurrentRound()
@@ -280,7 +280,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) {
lastNormalNodeSignedRound := basics.Round(lastNormalRound).RoundDownToMultipleOf(basics.Round(consensusParams.StateProofInterval))
lastNormalNextStateProofRound := lastNormalNodeSignedRound + basics.Round(consensusParams.StateProofInterval)
targetRound := lastNormalNextStateProofRound + basics.Round(consensusParams.StateProofInterval*2)
- err = fixture.ClientWaitForRoundWithTimeout(usingNodeRestClient, uint64(targetRound))
+ err = usingNodeRestClient.WaitForRoundWithTimeout(uint64(targetRound))
a.NoError(err)
primaryClient := fixture.GetLibGoalClientFromNodeController(primaryNode)
diff --git a/test/e2e-go/features/followernode/syncDeltas_test.go b/test/e2e-go/features/followernode/syncDeltas_test.go
index af27c7dda7..d1458b7451 100644
--- a/test/e2e-go/features/followernode/syncDeltas_test.go
+++ b/test/e2e-go/features/followernode/syncDeltas_test.go
@@ -74,7 +74,7 @@ func TestBasicSyncMode(t *testing.T) {
// Let the network make some progress
waitForRound := uint64(5)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Get the follower client, and exercise the sync/ledger functionality
@@ -88,7 +88,7 @@ func TestBasicSyncMode(t *testing.T) {
a.NoError(err)
a.Equal(round, rResp.Round)
// make some progress to round
- err = fixture.ClientWaitForRoundWithTimeout(followClient, round)
+ err = followClient.WaitForRoundWithTimeout(round)
a.NoError(err)
// retrieve state delta
gResp, err := followClient.GetLedgerStateDelta(round)
@@ -113,6 +113,6 @@ func TestBasicSyncMode(t *testing.T) {
err = followClient.SetSyncRound(round + 1)
a.NoError(err)
}
- err = fixture.LibGoalFixture.ClientWaitForRoundWithTimeout(fixture.LibGoalClient, waitForRound)
+ err = fixture.WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
}
diff --git a/test/e2e-go/features/followernode/syncRestart_test.go b/test/e2e-go/features/followernode/syncRestart_test.go
index 589bb7b53c..1aa5b2560d 100644
--- a/test/e2e-go/features/followernode/syncRestart_test.go
+++ b/test/e2e-go/features/followernode/syncRestart_test.go
@@ -62,7 +62,7 @@ func TestSyncRestart(t *testing.T) {
waitTill := func(node string, round uint64) {
controller, err := fixture.GetNodeController(node)
a.NoError(err)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(controller), round)
+ err = fixture.GetAlgodClientForController(controller).WaitForRoundWithTimeout(round)
a.NoError(err)
}
diff --git a/test/e2e-go/features/incentives/challenge_test.go b/test/e2e-go/features/incentives/challenge_test.go
index f45a1b6e2b..51586eab76 100644
--- a/test/e2e-go/features/incentives/challenge_test.go
+++ b/test/e2e-go/features/incentives/challenge_test.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated/model"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
@@ -48,17 +49,19 @@ func TestChallenges(t *testing.T) {
// Start a three-node network. One relay, two nodes with 4 accounts each
// At round 50, ~2 nodes will be challenged.
+ const lookback = 32
const interval = 50
const grace = 10
+ const mask = 0x80
var fixture fixtures.RestClientFixture
// Speed up rounds, keep lookback > 2 * grace period
- fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, 32)
+ fixture.FasterConsensus(protocol.ConsensusFuture, time.Second, lookback)
fixture.AlterConsensus(protocol.ConsensusFuture,
func(cp config.ConsensusParams) config.ConsensusParams {
cp.Payouts.ChallengeInterval = 50
cp.Payouts.ChallengeGracePeriod = 10
- cp.Payouts.ChallengeBits = 2
+ cp.Payouts.ChallengeBits = 1 // half of nodes should get challenged
return cp
})
fixture.Setup(t, filepath.Join("nettemplates", "Challenges.json"))
@@ -78,6 +81,10 @@ func TestChallenges(t *testing.T) {
// By re-regging, we become eligible for suspension (normal + challenges)
// TODO: Confirm that rereg is required for challenge suspensions
+
+ err := fixture.WaitForRoundWithTimeout(interval - lookback) // Make all LastHeartbeats > interval, < 2*interval
+ a.NoError(err)
+
for _, account := range accounts1 {
rekeyreg(&fixture, a, c1, account.Address)
}
@@ -88,86 +95,94 @@ func TestChallenges(t *testing.T) {
// turn off node 1, so it can't heartbeat
a.NoError(c1.FullStop())
- // Advance to first challenge round, check the blockseed
- err := fixture.WaitForRoundWithTimeout(interval)
+ current, err := c2.CurrentRound()
a.NoError(err)
+ // Get them all done so that their inflated LastHeartbeat comes before the
+ // next challenge.
+ a.Less(current+lookback, 2*uint64(interval))
+
+ // We need to wait for the first challenge that happens after the keyreg
+ // LastHeartbeat has passed. Example: current is 40, so the lastPossible
+ // LastHeartbeat is 72. Interval is 50, so challengeRound is 100.
- blk, err := c2.BookkeepingBlock(interval)
+ // 100 = 40 + 32 + (50-22) = 72 + 28
+ lastPossible := current + lookback
+ challengeRound := lastPossible + (interval - lastPossible%interval)
+
+ // Advance to challenge round, check the blockseed
+ err = fixture.WaitForRoundWithTimeout(challengeRound)
+ a.NoError(err)
+ blk, err := c2.BookkeepingBlock(challengeRound)
a.NoError(err)
- challenge := blk.BlockHeader.Seed[0] & 0xA0 // high two bits
+ challenge := blk.BlockHeader.Seed[0] & mask // high bit
- challenged1 := util.MakeSet[model.Account]()
+ challenged1 := util.MakeSet[basics.Address]()
for _, account := range accounts1 {
- abytes, err := basics.UnmarshalChecksumAddress(account.Address)
+ address, err := basics.UnmarshalChecksumAddress(account.Address)
a.NoError(err)
- if abytes[0]&0xA0 == challenge {
- fmt.Printf("%v of node 1 was challenged %v by %v\n", account.Address, abytes[0], challenge)
- challenged1.Add(account)
+ if address[0]&mask == challenge {
+ fmt.Printf("%v of node 1 was challenged %v by %v\n", address, address[0], challenge)
+ challenged1.Add(address)
}
}
+ require.NotEmpty(t, challenged1, "rerun the test") // TODO: remove.
- challenged2 := util.MakeSet[model.Account]()
+ challenged2 := util.MakeSet[basics.Address]()
for _, account := range accounts2 {
- abytes, err := basics.UnmarshalChecksumAddress(account.Address)
+ address, err := basics.UnmarshalChecksumAddress(account.Address)
a.NoError(err)
- if abytes[0]&0xA0 == challenge {
- fmt.Printf("%v of node 2 was challenged %v by %v\n", account.Address, abytes[0], challenge)
- challenged2.Add(account)
+ if address[0]&mask == challenge {
+ fmt.Printf("%v of node 2 was challenged %v by %v\n", address, address[0], challenge)
+ challenged2.Add(address)
}
}
+ require.NotEmpty(t, challenged2, "rerun the test") // TODO: remove.
allChallenged := util.Union(challenged1, challenged2)
- // TODO: unroll this loop and notice the heartbeat transactions from node 2
- err = fixture.WaitForRoundWithTimeout(interval + grace)
- a.NoError(err)
-
- // challenged accounts are still online
- for account := range allChallenged {
- data, err := c2.AccountData(account.Address)
+ // All challenged nodes are still online
+ for address := range allChallenged {
+ data, err := c2.AccountData(address.String())
a.NoError(err)
- a.Equal(basics.Online, data.Status)
+ a.Equal(basics.Online, data.Status, "%v %d", address.String(), data.LastHeartbeat)
a.NotZero(data.VoteID)
a.True(data.IncentiveEligible)
}
- err = fixture.WaitForRoundWithTimeout(interval + grace + 1)
- a.NoError(err)
-
- // The challenged nodes need be "noticed" to be suspended. TODO: Remove this
- // section when we have prompt suspensions.
- source := accounts2[0] // always pay from operational account on node 2
- for account := range allChallenged {
- fmt.Printf("pay %v\n", account.Address)
- txn, err := c2.SendPaymentFromUnencryptedWallet(source.Address, account.Address, 1000, 0, nil)
- a.NoError(err)
- info, err := fixture.WaitForConfirmedTxn(uint64(txn.LastValid), txn.ID().String())
- a.NoError(err)
+ // In the second half of the grace period, Node 2 should heartbeat for its accounts
+ beated := util.MakeSet[basics.Address]()
+ fixture.WithEveryBlock(challengeRound+grace/2, challengeRound+grace, func(block bookkeeping.Block) {
+ for _, txn := range block.Payset {
+ hb := txn.Txn.HeartbeatTxnFields
+ fmt.Printf("Heartbeat txn %v\n", hb)
+ a.True(challenged2.Contains(hb.HbAddress)) // only Node 2 is alive
+ a.False(beated.Contains(hb.HbAddress)) // beat only once
+ beated.Add(hb.HbAddress)
+ }
+ a.Empty(block.AbsentParticipationAccounts) // nobody suspended during grace
+ })
+ a.Equal(challenged2, beated)
- blk, err := c2.BookkeepingBlock(*info.ConfirmedRound)
- a.NoError(err)
- a.Len(blk.AbsentParticipationAccounts, 1)
- a.Equal(blk.AbsentParticipationAccounts[0].String(), account.Address)
- }
+ blk, err = fixture.WaitForBlockWithTimeout(challengeRound + grace + 1)
+ a.NoError(err)
+ a.Equal(challenged1, util.MakeSet(blk.AbsentParticipationAccounts...))
// node 1 challenged accounts are suspended because node 1 is off
- for account := range challenged1 {
- fmt.Printf("check1 %v\n", account.Address)
- data, err := c2.AccountData(account.Address)
+ for address := range challenged1 {
+ data, err := c2.AccountData(address.String())
a.NoError(err)
- a.Equal(basics.Offline, data.Status, account.Address)
- a.NotZero(data.VoteID, account.Address)
- a.False(data.IncentiveEligible, account.Address) // suspension turns off flag
+ a.Equal(basics.Offline, data.Status, address)
+ a.NotZero(data.VoteID, address)
+ a.False(data.IncentiveEligible, address) // suspension turns off flag
}
// node 2 challenged accounts are not suspended (saved by heartbeat)
- for account := range challenged2 {
- fmt.Printf("check2 %v\n", account.Address)
- data, err := c2.AccountData(account.Address)
+ for address := range challenged2 {
+ data, err := c2.AccountData(address.String())
a.NoError(err)
- a.Equal(basics.Online, data.Status, account.Address)
- a.NotZero(data.VoteID, account.Address)
- a.True(data.IncentiveEligible, account.Address)
+ a.Equal(basics.Online, data.Status, address)
+ a.NotZero(data.VoteID, address)
+ a.True(data.IncentiveEligible, address)
}
}
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index 0b38fe76ff..21a701139a 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -216,7 +216,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
// Need to wait for funding to take effect on selection, then we can see if we're participating
// Stop before the account should become eligible for selection so we can ensure it wasn't
- err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting-1),
+ err = fixture.WaitForRound(uint64(accountProposesStarting-1),
time.Duration(uint64(globals.MaxTimePerRound)*uint64(accountProposesStarting-1)))
a.NoError(err)
@@ -226,7 +226,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
a.False(blockWasProposed, "account should not be selected until BalLookback (round %d) passes", int(accountProposesStarting-1))
// Now wait until the round where the funded account will be used.
- err = fixture.ClientWaitForRound(fixture.AlgodClient, uint64(accountProposesStarting), 10*globals.MaxTimePerRound)
+ err = fixture.WaitForRound(uint64(accountProposesStarting), 10*globals.MaxTimePerRound)
a.NoError(err)
blockWasProposedByNewAccountRecently := fixture.VerifyBlockProposedRange(newAccount, int(accountProposesStarting), 1)
diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
index 21ce3bdf0d..e3429490c4 100644
--- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
+++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go
@@ -57,7 +57,7 @@ func TestBasicPartitionRecovery(t *testing.T) {
// Let the network make some progress
waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
+ err = fixture.GetAlgodClientForController(nc).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Now stop 2nd node
@@ -133,7 +133,7 @@ func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFix
// Let the network make some progress
waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound)
+ err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Stop Node1
@@ -196,7 +196,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) {
// Let the network make some progress
waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound)
+ err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(waitForRound)
a.NoError(err)
// Stop Node1
@@ -264,8 +264,7 @@ func TestPartitionHalfOffline(t *testing.T) {
// Let the network make some progress
client := fixture.LibGoalClient
- waitForRound := uint64(3)
- err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc1), waitForRound)
+ err = fixture.GetAlgodClientForController(nc1).WaitForRoundWithTimeout(3)
a.NoError(err)
// Stop nodes with 50% of stake
diff --git a/test/e2e-go/restAPI/other/misc_test.go b/test/e2e-go/restAPI/other/misc_test.go
index eeaff9fcd1..23e805dc25 100644
--- a/test/e2e-go/restAPI/other/misc_test.go
+++ b/test/e2e-go/restAPI/other/misc_test.go
@@ -62,7 +62,7 @@ func TestDisabledAPIConfig(t *testing.T) {
a.NoError(err)
testClient := client.MakeRestClient(url, "") // empty token
- _, err = testClient.WaitForBlock(1)
+ err = testClient.WaitForRoundWithTimeout(1)
assert.NoError(t, err)
_, err = testClient.Block(1)
assert.NoError(t, err)
diff --git a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go
index 66601c1737..639894a671 100644
--- a/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go
+++ b/test/e2e-go/restAPI/simulate/simulateRestAPI_test.go
@@ -288,11 +288,11 @@ int 1`
// Let the primary node make some progress
primaryClient := fixture.GetAlgodClientForController(nc)
- err = fixture.ClientWaitForRoundWithTimeout(primaryClient, followerSyncRound+uint64(cfg.MaxAcctLookback))
+ err = primaryClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback))
a.NoError(err)
// Let follower node progress as far as it can
- err = fixture.ClientWaitForRoundWithTimeout(followClient, followerSyncRound+uint64(cfg.MaxAcctLookback)-1)
+ err = followClient.WaitForRoundWithTimeout(followerSyncRound + uint64(cfg.MaxAcctLookback) - 1)
a.NoError(err)
simulateRequest := v2.PreEncodedSimulateRequest{
diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go
index 549a82c5ab..c41ad84166 100644
--- a/test/e2e-go/upgrades/application_support_test.go
+++ b/test/e2e-go/upgrades/application_support_test.go
@@ -180,7 +180,7 @@ int 1
curStatus, err = client.Status()
a.NoError(err)
- a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
}
@@ -438,7 +438,7 @@ int 1
curStatus, err = client.Status()
a.NoError(err)
- a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go
index 0dcec41545..cc3eca018c 100644
--- a/test/e2e-go/upgrades/rekey_support_test.go
+++ b/test/e2e-go/upgrades/rekey_support_test.go
@@ -150,7 +150,7 @@ func TestRekeyUpgrade(t *testing.T) {
curStatus, err = client.Status()
a.NoError(err)
- a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute))
+ a.Less(int64(time.Since(startLoopTime)), int64(3*time.Minute))
time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond)
round = curStatus.LastRound
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 0a88153989..c05a59ff1f 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -42,7 +42,6 @@ import (
"github.com/algorand/go-algorand/netdeploy"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/test/e2e-go/globals"
"github.com/algorand/go-algorand/util/db"
)
@@ -67,12 +66,12 @@ func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) {
f.consensus = consensus
}
+// AlterConsensus allows the caller to modify the consensus settings for a given version.
func (f *RestClientFixture) AlterConsensus(ver protocol.ConsensusVersion, alter func(config.ConsensusParams) config.ConsensusParams) {
if f.consensus == nil {
f.consensus = make(config.ConsensusProtocols)
}
- consensus := config.Consensus[ver]
- f.consensus[ver] = alter(consensus)
+ f.consensus[ver] = alter(f.ConsensusParamsFromVer(ver))
}
// FasterConsensus speeds up the given consensus version in two ways. The seed
@@ -80,21 +79,19 @@ func (f *RestClientFixture) AlterConsensus(ver protocol.ConsensusVersion, alter
// lookback becomes 32. And, if the architecture implies it can be handled,
// round times are shortened by lowering vote timeouts.
func (f *RestClientFixture) FasterConsensus(ver protocol.ConsensusVersion, timeout time.Duration, lookback basics.Round) {
- if f.consensus == nil {
- f.consensus = make(config.ConsensusProtocols)
- }
- fast := config.Consensus[ver]
- // balanceRound is 4 * SeedRefreshInterval
- if lookback%4 != 0 {
- panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback))
- }
- fast.SeedRefreshInterval = uint64(lookback) / 4
- // and speed up the rounds while we're at it
- if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" {
- fast.AgreementFilterTimeoutPeriod0 = timeout
- fast.AgreementFilterTimeout = timeout
- }
- f.consensus[ver] = fast
+ f.AlterConsensus(ver, func(fast config.ConsensusParams) config.ConsensusParams {
+ // balanceRound is 4 * SeedRefreshInterval
+ if lookback%4 != 0 {
+ panic(fmt.Sprintf("lookback must be a multiple of 4, got %d", lookback))
+ }
+ fast.SeedRefreshInterval = uint64(lookback) / 4
+ // and speed up the rounds while we're at it
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" {
+ fast.AgreementFilterTimeoutPeriod0 = timeout
+ fast.AgreementFilterTimeout = timeout
+ }
+ return fast
+ })
}
// Setup is called to initialize the test fixture for the test(s)
@@ -460,75 +457,6 @@ func (f *LibGoalFixture) GetParticipationOnlyAccounts(lg libgoal.Client) []accou
return f.clientPartKeys[lg.DataDir()]
}
-// WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the
-// globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach.
-func (f *LibGoalFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error {
- return f.ClientWaitForRoundWithTimeout(f.LibGoalClient, roundToWaitFor)
-}
-
-// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation
-// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're
-// getting "hung" before waiting for all the expected rounds to reach.
-func (f *LibGoalFixture) ClientWaitForRoundWithTimeout(client libgoal.Client, roundToWaitFor uint64) error {
- status, err := client.Status()
- require.NoError(f.t, err)
- lastRound := status.LastRound
-
- // If node is already at or past target round, we're done
- if lastRound >= roundToWaitFor {
- return nil
- }
-
- roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer
- roundComplete := make(chan error, 2)
-
- for nextRound := lastRound + 1; lastRound < roundToWaitFor; {
- roundStarted := time.Now()
-
- go func(done chan error) {
- err := f.ClientWaitForRound(client, nextRound, roundTime)
- done <- err
- }(roundComplete)
-
- select {
- case lastError := <-roundComplete:
- if lastError != nil {
- close(roundComplete)
- return lastError
- }
- case <-time.After(roundTime):
- // we've timed out.
- time := time.Now().Sub(roundStarted)
- return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound)
- }
-
- roundTime = singleRoundMaxTime
- lastRound++
- nextRound++
- }
- return nil
-}
-
-// ClientWaitForRound waits up to the specified amount of time for
-// the network to reach or pass the specified round, on the specific client/node
-func (f *LibGoalFixture) ClientWaitForRound(client libgoal.Client, round uint64, waitTime time.Duration) error {
- timeout := time.NewTimer(waitTime)
- for {
- status, err := client.Status()
- if err != nil {
- return err
- }
- if status.LastRound >= round {
- return nil
- }
- select {
- case <-timeout.C:
- return fmt.Errorf("timeout waiting for round %v", round)
- case <-time.After(200 * time.Millisecond):
- }
- }
-}
-
// CurrentConsensusParams returns the consensus parameters for the currently active protocol
func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusParams, err error) {
status, err := f.LibGoalClient.Status()
@@ -540,20 +468,20 @@ func (f *LibGoalFixture) CurrentConsensusParams() (consensus config.ConsensusPar
}
// ConsensusParams returns the consensus parameters for the protocol from the specified round
-func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.ConsensusParams, err error) {
+func (f *LibGoalFixture) ConsensusParams(round uint64) (config.ConsensusParams, error) {
block, err := f.LibGoalClient.BookkeepingBlock(round)
if err != nil {
- return
+ return config.ConsensusParams{}, err
}
- version := protocol.ConsensusVersion(block.CurrentProtocol)
- if f.consensus != nil {
- consensus, has := f.consensus[version]
- if has {
- return consensus, nil
- }
+ return f.ConsensusParamsFromVer(block.CurrentProtocol), nil
+}
+
+// ConsensusParamsFromVer looks up a consensus version, allowing for override
+func (f *LibGoalFixture) ConsensusParamsFromVer(cv protocol.ConsensusVersion) config.ConsensusParams {
+ if consensus, has := f.consensus[cv]; has {
+ return consensus
}
- consensus = config.Consensus[version]
- return
+ return config.Consensus[cv]
}
// CurrentMinFeeAndBalance returns the MinTxnFee and MinBalance for the currently active protocol
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 473df25d38..fb1a26d31b 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/netdeploy"
"github.com/algorand/go-algorand/protocol"
@@ -34,7 +35,6 @@ import (
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/nodecontrol"
- "github.com/algorand/go-algorand/test/e2e-go/globals"
"github.com/algorand/go-algorand/util/tokens"
)
@@ -80,79 +80,37 @@ func (f *RestClientFixture) GetAlgodClientForController(nc nodecontrol.NodeContr
// WaitForRound waits up to the specified amount of time for
// the network to reach or pass the specified round
func (f *RestClientFixture) WaitForRound(round uint64, waitTime time.Duration) error {
- return f.ClientWaitForRound(f.AlgodClient, round, waitTime)
+ _, err := f.AlgodClient.WaitForRound(round, waitTime)
+ return err
}
-// ClientWaitForRound waits up to the specified amount of time for
-// the network to reach or pass the specified round, on the specific client/node
-func (f *RestClientFixture) ClientWaitForRound(client client.RestClient, round uint64, waitTime time.Duration) error {
- timeout := time.NewTimer(waitTime)
- for {
- status, err := client.Status()
- if err != nil {
- return err
- }
-
- if status.LastRound >= round {
- return nil
- }
- select {
- case <-timeout.C:
- return fmt.Errorf("timeout waiting for round %v with last round = %v", round, status.LastRound)
- case <-time.After(200 * time.Millisecond):
- }
+// WithEveryBlock calls the provided function for every block from first to last.
+func (f *RestClientFixture) WithEveryBlock(first, last uint64, visit func(bookkeeping.Block)) {
+ for round := first; round <= last; round++ {
+ err := f.WaitForRoundWithTimeout(round)
+ require.NoError(f.t, err)
+ block, err := f.AlgodClient.Block(round)
+ require.NoError(f.t, err)
+ visit(block.Block)
}
}
// WaitForRoundWithTimeout waits for a given round to reach. The implementation also ensures to limit the wait time for each round to the
// globals.MaxTimePerRound so we can alert when we're getting "hung" before waiting for all the expected rounds to reach.
func (f *RestClientFixture) WaitForRoundWithTimeout(roundToWaitFor uint64) error {
- return f.ClientWaitForRoundWithTimeout(f.AlgodClient, roundToWaitFor)
+ return f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor)
}
-const singleRoundMaxTime = globals.MaxTimePerRound * 40
-
-// ClientWaitForRoundWithTimeout waits for a given round to be reached by the specific client/node. The implementation
-// also ensures to limit the wait time for each round to the globals.MaxTimePerRound so we can alert when we're
-// getting "hung" before waiting for all the expected rounds to reach.
-func (f *RestClientFixture) ClientWaitForRoundWithTimeout(client client.RestClient, roundToWaitFor uint64) error {
- status, err := client.Status()
- require.NoError(f.t, err)
- lastRound := status.LastRound
-
- // If node is already at or past target round, we're done
- if lastRound >= roundToWaitFor {
- return nil
+// WaitForBlockWithTimeout waits for a given round and returns its block.
+func (f *RestClientFixture) WaitForBlockWithTimeout(roundToWaitFor uint64) (bookkeeping.Block, error) {
+ if err := f.AlgodClient.WaitForRoundWithTimeout(roundToWaitFor); err != nil {
+ return bookkeeping.Block{}, err
}
-
- roundTime := globals.MaxTimePerRound * 10 // For first block, we wait much longer
- roundComplete := make(chan error, 2)
-
- for nextRound := lastRound + 1; lastRound < roundToWaitFor; {
- roundStarted := time.Now()
-
- go func(done chan error) {
- err := f.ClientWaitForRound(client, nextRound, roundTime)
- done <- err
- }(roundComplete)
-
- select {
- case lastError := <-roundComplete:
- if lastError != nil {
- close(roundComplete)
- return lastError
- }
- case <-time.After(roundTime):
- // we've timed out.
- time := time.Now().Sub(roundStarted)
- return fmt.Errorf("fixture.WaitForRound took %3.2f seconds between round %d and %d", time.Seconds(), lastRound, nextRound)
- }
-
- roundTime = singleRoundMaxTime
- lastRound++
- nextRound++
+ both, err := f.AlgodClient.EncodedBlockCert(roundToWaitFor)
+ if err != nil {
+ return bookkeeping.Block{}, err
}
- return nil
+ return both.Block, nil
}
// GetFirstAccount returns the first account from listing local accounts
@@ -367,17 +325,15 @@ func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassw
// VerifyBlockProposedRange checks the rounds starting at fromRounds and moving backwards checking countDownNumRounds rounds if any
// blocks were proposed by address
-func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) (blockWasProposed bool) {
- c := f.LibGoalClient
+func (f *RestClientFixture) VerifyBlockProposedRange(account string, fromRound, countDownNumRounds int) bool {
for i := 0; i < countDownNumRounds; i++ {
- cert, err := c.EncodedBlockCert(uint64(fromRound - i))
+ cert, err := f.AlgodClient.EncodedBlockCert(uint64(fromRound - i))
require.NoError(f.t, err, "client failed to get block %d", fromRound-i)
if cert.Certificate.Proposal.OriginalProposer.GetUserAddress() == account {
- blockWasProposed = true
- break
+ return true
}
}
- return
+ return false
}
// VerifyBlockProposed checks the last searchRange blocks to see if any blocks were proposed by address
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index a6e524464d..8a52862c54 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -311,7 +311,7 @@ func (db *Accessor) AtomicContext(ctx context.Context, fn idemFn, extras ...inte
}
if time.Now().After(atomicDeadline) {
- db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Now().Sub(atomicDeadline))
+ db.getDecoratedLogger(fn, extras).Warnf("dbatomic: tx surpassed expected deadline by %v", time.Since(atomicDeadline))
}
return
}