From 4cbca75b75629bfe3a31011faede28663788829e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 27 Apr 2023 14:22:54 +0200 Subject: [PATCH 01/18] Fix `TestStateOversizedBlock` (backport #755) (#765) * Fix `TestStateOversizedBlock` (#755) * Fix TestStateOversizedBlock * Moved `findBlockSizeLimit` together with other aux functions (cherry picked from commit c58597d656d5c816334aff9ea8e600bdbc534817) # Conflicts: # consensus/state_test.go * Revert "Fix `TestStateOversizedBlock` (#755)" This reverts commit 50a4555280e7687856cc4feb1a32c2ee03409467. * Fix `TestStateOversizedBlock` (#755) * Fix TestStateOversizedBlock * Moved `findBlockSizeLimit` together with other aux functions --------- Co-authored-by: Sergio Mena --- consensus/state_test.go | 153 ++++++++++++++++++++++++++-------------- 1 file changed, 101 insertions(+), 52 deletions(-) diff --git a/consensus/state_test.go b/consensus/state_test.go index dea48b27123..298dd907705 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strings" "testing" "time" @@ -252,70 +253,88 @@ func TestStateBadProposal(t *testing.T) { } func TestStateOversizedBlock(t *testing.T) { - cs1, vss := randState(2) - cs1.state.ConsensusParams.Block.MaxBytes = 2000 - height, round := cs1.Height, cs1.Round - vs2 := vss[1] - - partSize := types.BlockPartSizeBytes - - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + const maxBytes = 2000 - propBlock, err := cs1.createProposalBlock() - require.NoError(t, err) - propBlock.Data.Txs = []types.Tx{cmtrand.Bytes(2001)} - propBlock.Header.DataHash = propBlock.Data.Hash() + for _, testCase := range []struct { + name string + oversized bool + }{ + { + name: "max size, correct block", + oversized: false, + }, + { + name: "off-by-1 max size, incorrect block", + oversized: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + cs1, vss := randState(2) + cs1.state.ConsensusParams.Block.MaxBytes = maxBytes + height, round := cs1.Height, cs1.Round + vs2 := vss[1] - // make the second validator the proposer by incrementing round - round++ - incrementRound(vss[1:]...) + partSize := types.BlockPartSizeBytes - propBlockParts, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) - p := proposal.ToProto() - if err := vs2.SignProposal(config.ChainID(), p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - proposal.Signature = p.Signature + propBlock, propBlockParts := findBlockSizeLimit(t, height, maxBytes, cs1, partSize, testCase.oversized) - totalBytes := 0 - for i := 0; i < int(propBlockParts.Total()); i++ { - part := propBlockParts.GetPart(i) - totalBytes += len(part.Bytes) - } + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + // make the second validator the proposer by incrementing round + round++ + incrementRound(vss[1:]...) - // start the machine - startTestRound(cs1, height, round) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(height, round, -1, blockID) + p := proposal.ToProto() + if err := vs2.SignProposal(config.ChainID(), p); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + proposal.Signature = p.Signature - t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + totalBytes := 0 + for i := 0; i < int(propBlockParts.Total()); i++ { + part := propBlockParts.GetPart(i) + totalBytes += len(part.Bytes) + } - // c1 should log an error with the block part message as it exceeds the consensus params. The - // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } - // and then should send nil prevote and precommit regardless of whether other validators prevote and - // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + // start the machine + startTestRound(cs1, height, round) + + t.Log("Block Sizes;", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + + validateHash := propBlock.Hash() + lockedRound := int32(1) + if testCase.oversized { + validateHash = nil + lockedRound = -1 + // if the block is oversized cs1 should log an error with the block part message as it exceeds + // the consensus params. The block is not added to cs.ProposalBlock so the node timeouts. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + } + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], validateHash) - bps, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) + bps, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(cs1, cmtproto.PrevoteType, propBlock.Hash(), bps.Header(), vs2) + ensurePrevote(voteCh, height, round) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, lockedRound, vss[0], validateHash, validateHash) - bps2, err := propBlock.MakePartSet(partSize) - require.NoError(t, err) - signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), vs2) + bps2, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), bps2.Header(), vs2) + }) + } } //---------------------------------------------------------------------------------------------------- @@ -2018,3 +2037,33 @@ func subscribeUnBuffered(eventBus *types.EventBus, q cmtpubsub.Query) <-chan cmt } return sub.Out() } + +func findBlockSizeLimit(t *testing.T, height, maxBytes int64, cs *State, partSize uint32, oversized bool) (*types.Block, *types.PartSet) { + var offset int64 + if !oversized { + offset = -2 + } + softMaxDataBytes := int(types.MaxDataBytes(maxBytes, 0, 0)) + for i := softMaxDataBytes; i < softMaxDataBytes*2; i++ { + propBlock := cs.state.MakeBlock( + height, + []types.Tx{[]byte("a=" + strings.Repeat("o", i-2))}, + &types.Commit{}, + nil, + cs.privValidatorPubKey.Address(), + ) + + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + if propBlockParts.ByteSize() > maxBytes+offset { + s := "real max" + if oversized { + s = "off-by-1" + } + t.Log("Detected "+s+" data size for block;", "size", i, "softMaxDataBytes", softMaxDataBytes) + return propBlock, propBlockParts + } + } + require.Fail(t, "We shouldn't hit the end of the loop") + return nil, nil +} From 9267594e0a17c01cc4a97b399ada5eaa8a734db5 Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Wed, 3 May 2023 10:38:12 +0200 Subject: [PATCH 02/18] v0.37 pubsub: Handle big ints (#771) Replaced int64 with big.int Co-authored-by: Lasaro Co-authored-by: Sergio Mena --- .../771-kvindexer-parsing-big-ints.md | 2 + .../bug-fixes/771-pubsub-parsing-big-ints.md | 3 + docs/app-dev/indexing-transactions.md | 7 + docs/core/subscription.md | 14 ++ libs/pubsub/query/query.go | 104 +++++++----- libs/pubsub/query/query_test.go | 76 ++++++++- state/indexer/block/kv/kv.go | 15 +- state/indexer/block/kv/kv_test.go | 152 +++++++++++++++--- state/indexer/block/kv/util.go | 5 +- state/indexer/query_range.go | 8 +- state/txindex/kv/kv.go | 15 +- state/txindex/kv/kv_test.go | 71 ++++++++ state/txindex/kv/utils.go | 5 +- 13 files changed, 394 insertions(+), 83 deletions(-) create mode 100644 .changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md create mode 100644 .changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md diff --git a/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md new file mode 100644 index 00000000000..8114534c051 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md @@ -0,0 +1,2 @@ +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now enabled. We are not supporting reading floats from the db into the indexer nor parsing them into BigFloats to not introduce breaking changes in minor releases. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md new file mode 100644 index 00000000000..749b30d5b50 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md @@ -0,0 +1,3 @@ +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than int64). Very big floats + are also properly parsed into very big integers instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) \ No newline at end of file diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 038403128bf..da191fb8393 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -245,3 +245,10 @@ This behavior was fixed with CometBFT 0.34.26+. However, if the data was indexed Tendermint Core and not re-indexed, that data will be queried as if all the attributes within a height occurred within the same event. +# Event attribute value types +Users can use anything as an event value. However, if the even attrbute value is a number, the following restrictions apply: +- Negative numbers will not be properly retrieved when querying the indexer +- When querying the events using `tx_search` and `block_search`, the value given as part of the condition cannot be a float. +- Any event value retrieved from the database will be represented as a `BigInt` (from `math/big`) +- Floating point values are not read from the database even with the introduction of `BigInt`. This was intentionally done +to keep the same beheaviour as was historically present and not introduce breaking changes. This will be fixed in the 0.38 series. \ No newline at end of file diff --git a/docs/core/subscription.md b/docs/core/subscription.md index bcb86119d63..bafb349bb17 100644 --- a/docs/core/subscription.md +++ b/docs/core/subscription.md @@ -40,6 +40,20 @@ You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](../app-dev/indexing-transactions.md) for details. + +## Query parameter and event type restrictions + +While CometBFT imposes no restrictions on the application with regards to the type of +the event output, there are several restrictions when it comes to querying +events whose attribute values are numeric. + +- Queries cannot include negative numbers +- If floating points are compared to integers, they are converted to an integer +- Floating point to floating point comparison leads to a loss of precision for very big floating point numbers +(`10000000000000000000.0` is treated the same as `10000000000000000000.6`) +- When floating points do get converted to integers, they are not rounded, the decimal part is simply truncated. +This has been done to preserve the behaviour present before introducing the support for BigInts in the query parameters. + ## ValidatorSetUpdates When validator set changes, ValidatorSetUpdates event is published. The diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 7495b11acaf..87426c24dd8 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -10,6 +10,7 @@ package query import ( "fmt" + "math/big" "reflect" "regexp" "strconv" @@ -152,16 +153,18 @@ func (q *Query) Conditions() ([]Condition, error) { conditions = append(conditions, Condition{eventAttr, op, value}) } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + valueBig := new(big.Int) + + _, ok := valueBig.SetString(number, 10) + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigint (should never happen if the grammar is correct)", + number, ) return nil, err } + conditions = append(conditions, Condition{eventAttr, op, valueBig}) - conditions = append(conditions, Condition{eventAttr, op, value}) } case ruletime: @@ -299,11 +302,13 @@ func (q *Query) Matches(events map[string][]string) (bool, error) { return false, nil } } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - err = fmt.Errorf( - "got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", - err, number, + value := new(big.Int) + _, ok := value.SetString(number, 10) + + if !ok { + err := fmt.Errorf( + "problem parsing %s as bigInt (should never happen if the grammar is correct)", + number, ) return false, err } @@ -452,42 +457,59 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) return v == operandFloat64, nil } - case reflect.Int64: - var v int64 + case reflect.Pointer: - operandInt := operand.Interface().(int64) - filteredValue := numRegex.FindString(value) + switch operand.Interface().(type) { + case *big.Int: + filteredValue := numRegex.FindString(value) + operandVal := operand.Interface().(*big.Int) + v := new(big.Int) + if strings.ContainsAny(filteredValue, ".") { + // We do this just to check whether the string can be parsed as a float + _, err := strconv.ParseFloat(filteredValue, 64) + if err != nil { + err = fmt.Errorf( + "got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", + err, filteredValue, + ) + return false, err + } - // if value looks like float, we try to parse it as float - if strings.ContainsAny(filteredValue, ".") { - v1, err := strconv.ParseFloat(filteredValue, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) - } + // If yes, we get the int part of the string. + // We could simply cast the float to an int and use that to create a big int but + // if it is a number bigger than int64, it will not be parsed properly. + // If we use bigFloat and convert that to a string, the values will be rounded which + // is not what we want either. + // Here we are simulating the behavior that int64(floatValue). This was the default behavior + // before introducing BigInts and we do not want to break the logic in minor releases. + _, ok := v.SetString(strings.Split(filteredValue, ".")[0], 10) + if !ok { + return false, fmt.Errorf("failed to convert value %s from float to big int", filteredValue) + } + } else { + // try our best to convert value from tags to big int + _, ok := v.SetString(filteredValue, 10) + + if !ok { + return false, fmt.Errorf("failed to convert value %v from event attribute to big int", filteredValue) + } - v = int64(v1) - } else { - var err error - // try our best to convert value from tags to int64 - v, err = strconv.ParseInt(filteredValue, 10, 64) - if err != nil { - return false, fmt.Errorf("failed to convert value %v from event attribute to int64: %w", filteredValue, err) } - } + cmpRes := operandVal.Cmp(v) + switch op { + case OpLessEqual: + return cmpRes == 0 || cmpRes == 1, nil + case OpGreaterEqual: + return cmpRes == 0 || cmpRes == -1, nil + case OpLess: + return cmpRes == 1, nil + case OpGreater: + return cmpRes == -1, nil + case OpEqual: + return cmpRes == 0, nil + } - switch op { - case OpLessEqual: - return v <= operandInt, nil - case OpGreaterEqual: - return v >= operandInt, nil - case OpLess: - return v < operandInt, nil - case OpGreater: - return v > operandInt, nil - case OpEqual: - return v == operandInt, nil } - case reflect.String: switch op { case OpEqual: diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index 98343858855..fe586976bcd 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -2,6 +2,7 @@ package query_test import ( "fmt" + "math/big" "testing" "time" @@ -11,6 +12,57 @@ import ( "github.com/cometbft/cometbft/libs/pubsub/query" ) +func TestBigNumbers(t *testing.T) { + bigInt := "10000000000000000000" + bigIntAsFloat := "10000000000000000000.0" + bigFloat := "10000000000000000000.6" + bigFloatLowerRounding := "10000000000000000000.1" + doubleBigInt := "20000000000000000000" + + testCases := []struct { + s string + events map[string][]string + err bool + matches bool + matchErr bool + }{ + + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {bigIntAsFloat}}, false, true, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigInt}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"10000000000000000001"}}, false, false, false}, + {"account.balance <= " + doubleBigInt, map[string][]string{"account.balance": {bigFloat}}, false, true, false}, + // To maintain compatibility with the old implementation which did a simple cast of float to int64, we do not round the float + // Thus both 10000000000000000000.6 and "10000000000000000000.1 are equal to 10000000000000000000 + // and the test does not find a match + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > " + bigInt, map[string][]string{"account.balance": {bigFloatLowerRounding}}, true, false, false}, + // This test should also find a match, but floats that are too big cannot be properly converted, thus + // 10000000000000000000.6 gets rounded to 10000000000000000000 + {"account.balance > " + bigIntAsFloat, map[string][]string{"account.balance": {bigFloat}}, false, false, false}, + {"account.balance > 11234.0", map[string][]string{"account.balance": {"11234.6"}}, false, true, false}, + {"account.balance <= " + bigInt, map[string][]string{"account.balance": {"1000.45"}}, false, true, false}, + } + + for _, tc := range testCases { + q, err := query.New(tc.s) + if !tc.err { + require.Nil(t, err) + } + require.NotNil(t, q, "Query '%s' should not be nil", tc.s) + + if tc.matches { + match, err := q.Matches(tc.events) + assert.Nil(t, err, "Query '%s' should not error on match %v", tc.s, tc.events) + assert.True(t, match, "Query '%s' should match %v", tc.s, tc.events) + } else { + match, err := q.Matches(tc.events) + assert.Equal(t, tc.matchErr, err != nil, "Unexpected error for query '%s' match %v", tc.s, tc.events) + assert.False(t, match, "Query '%s' should not match %v", tc.s, tc.events) + } + } +} + func TestMatches(t *testing.T) { var ( txDate = "2017-01-01" @@ -180,6 +232,10 @@ func TestConditions(t *testing.T) { txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") require.NoError(t, err) + bigInt := new(big.Int) + bigInt, ok := bigInt.SetString("10000000000000000000", 10) + require.True(t, ok) + testCases := []struct { s string conditions []query.Condition @@ -193,8 +249,24 @@ func TestConditions(t *testing.T) { { s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{ - {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, - {CompositeKey: "tx.gas", Op: query.OpLess, Operand: int64(9)}, + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: big.NewInt(7)}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > 7.5 AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: 7.5}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, + }, + }, + { + + s: "tx.gas > " + bigInt.String() + " AND tx.gas < 9", + conditions: []query.Condition{ + {CompositeKey: "tx.gas", Op: query.OpGreater, Operand: bigInt}, + {CompositeKey: "tx.gas", Op: query.OpLess, Operand: big.NewInt(9)}, }, }, { diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index 132f92cdccc..95a22b969c5 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "math/big" "sort" "strconv" "strings" @@ -293,9 +294,10 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(eventValue, 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + v, ok := v.SetString(eventValue, 10) + if !ok { // If the number was not int it might be a float but this behavior is kept the same as before the patch continue LOOP } @@ -364,15 +366,16 @@ func (idx *BlockerIndexer) setTmpHeights(tmpHeights map[string][]byte, it dbm.It } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index 395b4bcc4a6..7442855e0e5 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -249,26 +249,14 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustParse("block.height = 1"), results: []int64{1}, }, - "query return all events from a height - exact - no match.events": { - q: query.MustParse("block.height = 1"), - results: []int64{1}, - }, "query return all events from a height - exact (deduplicate height)": { q: query.MustParse("block.height = 1 AND block.height = 2"), results: []int64{1}, }, - "query return all events from a height - exact (deduplicate height) - no match.events": { - q: query.MustParse("block.height = 1 AND block.height = 2"), - results: []int64{1}, - }, "query return all events from a height - range": { q: query.MustParse("block.height < 2 AND block.height > 0 AND block.height > 0"), results: []int64{1}, }, - "query return all events from a height - range - no match.events": { - q: query.MustParse("block.height < 2 AND block.height > 0 AND block.height > 0"), - results: []int64{1}, - }, "query return all events from a height - range 2": { q: query.MustParse("block.height < 3 AND block.height < 2 AND block.height > 0 AND block.height > 0"), results: []int64{1}, @@ -281,10 +269,6 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustParse("end_event.bar < 300 AND end_event.foo = 100 AND block.height > 0 AND block.height <= 2"), results: []int64{1, 2}, }, - "query matches fields from same event - no match.events": { - q: query.MustParse("end_event.bar < 300 AND end_event.foo = 100 AND block.height > 0 AND block.height <= 2"), - results: []int64{1, 2}, - }, "query matches fields from multiple events": { q: query.MustParse("end_event.foo = 100 AND end_event.bar = 400 AND block.height = 2"), results: []int64{}, @@ -313,20 +297,142 @@ func TestBlockIndexerMulti(t *testing.T) { q: query.MustParse("end_event.bar > 100 AND end_event.bar <= 500"), results: []int64{1, 2}, }, - "query matches all fields from multiple events - no match.events": { - q: query.MustParse("end_event.bar > 100 AND end_event.bar <= 500"), - results: []int64{1, 2}, - }, "query with height range and height equality - should ignore equality": { q: query.MustParse("block.height = 2 AND end_event.foo >= 100 AND block.height < 2"), results: []int64{1}, }, "query with non-existent field": { - q: query.MustParse("end_event.baz = 100"), + q: query.MustParse("end_event.bar = 100"), + results: []int64{}, + }, + } + + for name, tc := range testCases { + tc := tc + t.Run(name, func(t *testing.T) { + results, err := indexer.Search(context.Background(), tc.q) + require.NoError(t, err) + require.Equal(t, tc.results, results) + }) + } +} + +func TestBigInt(t *testing.T) { + + bigInt := "10000000000000000000" + store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + indexer := blockidxkv.New(store) + + require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ + Header: types.Header{Height: 1}, + ResultBeginBlock: abci.ResponseBeginBlock{ + Events: []abci.Event{}, + }, + ResultEndBlock: abci.ResponseEndBlock{ + Events: []abci.Event{ + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: "100", + Index: true, + }, + { + Key: "bar", + Value: "10000000000000000000.76", + Index: true, + }, + { + Key: "bar_lower", + Value: "10000000000000000000.1", + Index: true, + }, + }, + }, + { + Type: "end_event", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: bigInt, + Index: true, + }, + { + Key: "bar", + Value: "500", + Index: true, + }, + { + Key: "bla", + Value: "500.5", + Index: true, + }, + }, + }, + }, + }, + })) + + testCases := map[string]struct { + q *query.Query + results []int64 + }{ + + "query return all events from a height - exact": { + q: query.MustParse("block.height = 1"), + results: []int64{1}, + }, + "query return all events from a height - exact (deduplicate height)": { + q: query.MustParse("block.height = 1 AND block.height = 2"), + results: []int64{1}, + }, + "query return all events from a height - range": { + q: query.MustParse("block.height < 2 AND block.height > 0 AND block.height > 0"), + results: []int64{1}, + }, + "query matches fields with big int and height - no match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int with less and height - no match": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND end_event.bar = 500 AND block.height = 2"), + results: []int64{}, + }, + "query matches fields with big int and height - match": { + q: query.MustParse("end_event.foo = " + bigInt + " AND end_event.bar = 500 AND block.height = 1"), + results: []int64{1}, + }, + "query matches big int in range": { + q: query.MustParse("end_event.foo = " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float - does not pass as float is not converted to int": { + q: query.MustParse("end_event.bar >= " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float - fails because float is converted to int": { + q: query.MustParse("end_event.bar > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float lower dec point - fails because float is converted to int": { + q: query.MustParse("end_event.bar_lower > " + bigInt), + results: []int64{}, + }, + "query matches big int in range with float with less - found": { + q: query.MustParse("end_event.foo <= " + bigInt), + results: []int64{1}, + }, + "query matches big int in range with float with less with height range - found": { + q: query.MustParse("end_event.foo <= " + bigInt + " AND block.height > 0"), + results: []int64{1}, + }, + "query matches big int in range with float with less - not found": { + q: query.MustParse("end_event.foo < " + bigInt + " AND end_event.foo > 100"), results: []int64{}, }, - "query with non-existent field ": { - q: query.MustParse("end_event.baz = 100"), + "query does not parse float": { + q: query.MustParse("end_event.bla >= 500"), results: []int64{}, }, } diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 89a89a74b4c..62f97aa0ba3 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -3,6 +3,7 @@ package kv import ( "encoding/binary" "fmt" + "math/big" "strconv" "github.com/google/orderedcode" @@ -149,7 +150,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio continue } else { heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() // As height is assumed to always be int64 found = true } } else { @@ -180,7 +181,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index 9a072b58a4e..b4af7c71698 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -1,6 +1,7 @@ package indexer import ( + "math/big" "time" "github.com/cometbft/cometbft/libs/pubsub/query" @@ -44,6 +45,9 @@ func (qr QueryRange) LowerBoundValue() interface{} { switch t := qr.LowerBound.(type) { case int64: return t + 1 + case *big.Int: + tmp := new(big.Int) + return tmp.Add(t, big.NewInt(1)) case time.Time: return t.Unix() + 1 @@ -67,7 +71,9 @@ func (qr QueryRange) UpperBoundValue() interface{} { switch t := qr.UpperBound.(type) { case int64: return t - 1 - + case *big.Int: + tmp := new(big.Int) + return tmp.Sub(t, big.NewInt(1)) case time.Time: return t.Unix() - 1 diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index c2007bb29a2..308c6580fc6 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "fmt" + "math/big" "strconv" "strings" @@ -516,9 +517,11 @@ LOOP: continue } - if _, ok := qr.AnyBound().(int64); ok { - v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - if err != nil { + if _, ok := qr.AnyBound().(*big.Int); ok { + v := new(big.Int) + eventValue := extractValueFromKey(it.Key()) + v, ok := v.SetString(eventValue, 10) + if !ok { continue LOOP } if qr.Key != types.TxHeightKey { @@ -661,15 +664,15 @@ func startKey(fields ...interface{}) []byte { return b.Bytes() } -func checkBounds(ranges indexer.QueryRange, v int64) bool { +func checkBounds(ranges indexer.QueryRange, v *big.Int) bool { include := true lowerBound := ranges.LowerBoundValue() upperBound := ranges.UpperBoundValue() - if lowerBound != nil && v < lowerBound.(int64) { + if lowerBound != nil && v.Cmp(lowerBound.(*big.Int)) == -1 { include = false } - if upperBound != nil && v > upperBound.(int64) { + if upperBound != nil && v.Cmp(upperBound.(*big.Int)) == 1 { include = false } diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index e55f08eae19..34ae6b11c86 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -19,6 +19,77 @@ import ( "github.com/cometbft/cometbft/types" ) +func TestBigInt(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB()) + + bigInt := "10000000000000000000" + bigIntPlus1 := "10000000000000000001" + bigFloat := bigInt + ".76" + bigFloatLower := bigInt + ".1" + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigInt, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigIntPlus1, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloatLower, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "owner", Value: "/Ivan/", Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}}, + }) + hash := types.Tx(txResult.Tx).Hash() + + err := indexer.Index(txResult) + + require.NoError(t, err) + + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: bigFloat, Index: true}, {Key: "amount", Value: "5", Index: true}}}, + }) + + txResult2.Tx = types.Tx("NEW TX") + txResult2.Height = 2 + txResult2.Index = 2 + + hash2 := types.Tx(txResult2.Tx).Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + testCases := []struct { + q string + txRes *abci.TxResult + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), txResult, 1}, + // search by hash (lower) + {fmt.Sprintf("tx.hash = '%x'", hash), txResult, 1}, + {fmt.Sprintf("tx.hash = '%x'", hash2), txResult2, 1}, + // search by exact match (one key) - bigint + {"account.number >= " + bigInt, nil, 1}, + // search by exact match (one key) - bigint range + {"account.number >= " + bigInt + " AND tx.height > 0", nil, 1}, + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.owner = '/Ivan/'", nil, 0}, + // Floats are not parsed + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.amount > 4", txResult2, 0}, + {"account.number >= " + bigInt + " AND tx.height > 0 AND account.amount = 5", txResult2, 0}, + {"account.number >= " + bigInt + " AND account.amount <= 5", txResult2, 0}, + {"account.number < " + bigInt + " AND tx.height = 1", nil, 0}, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustParse(tc.q)) + assert.NoError(t, err) + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 && tc.txRes != nil { + assert.True(t, proto.Equal(results[0], tc.txRes)) + } + }) + } +} + func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index 70d656485ac..4bc9dc0bcc3 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -2,6 +2,7 @@ package kv import ( "fmt" + "math/big" "github.com/cometbft/cometbft/libs/pubsub/query" "github.com/cometbft/cometbft/state/indexer" @@ -60,7 +61,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio } else { found = true heightCondition = append(heightCondition, c) - heightInfo.height = c.Operand.(int64) + heightInfo.height = c.Operand.(*big.Int).Int64() //Height is always int64 } } else { heightInfo.onlyHeightEq = false @@ -89,7 +90,7 @@ func dedupHeight(conditions []query.Condition) (dedupConditions []query.Conditio func checkHeightConditions(heightInfo HeightInfo, keyHeight int64) bool { if heightInfo.heightRange.Key != "" { - if !checkBounds(heightInfo.heightRange, keyHeight) { + if !checkBounds(heightInfo.heightRange, big.NewInt(keyHeight)) { return false } } else { From 38ab766ceb838d83e63219be4c7cf2a3e7cc1b96 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 4 May 2023 10:45:56 +0200 Subject: [PATCH 03/18] Struct `Client` exposes sensitive data (#784) (#787) (cherry picked from commit ecd5ee112533cda28900cbd75afb349f67da3fa5) Co-authored-by: Sergio Mena --- rpc/jsonrpc/client/http_json_client.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index c4ddbe1f173..ebe91e8a340 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -139,6 +139,8 @@ var _ HTTPClient = (*Client)(nil) var _ Caller = (*Client)(nil) var _ Caller = (*RequestBatch)(nil) +var _ fmt.Stringer = (*Client)(nil) + // New returns a Client pointed at the given address. // An error is returned on invalid remote. The function panics when remote is nil. func New(remote string) (*Client, error) { @@ -232,6 +234,10 @@ func getHTTPRespErrPrefix(resp *http.Response) string { return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) } +func (c *Client) String() string { + return fmt.Sprintf("&Client{user=%v, addr=%v, client=%v, nextReqID=%v}", c.username, c.address, c.client, c.nextReqID) +} + // NewRequestBatch starts a batch of requests for this client. func (c *Client) NewRequestBatch() *RequestBatch { return &RequestBatch{ From 72fa5359ea027bd030b22b029f96b1d29127cf86 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 4 May 2023 19:59:09 +0200 Subject: [PATCH 04/18] Unsafe int cast in `kill` command (backport #783) (#793) * Unsafe int cast in `kill` command (#783) * Unsafe int cast in `kill` command * Revert "Unsafe int cast in `kill` command" This reverts commit bbd649bd372ca90f83dea7b424d67dafbd9eb541. * Changed strategy (cherry picked from commit 03c5e7727a03983b54623e731d5d3d8dd4ac75ec) # Conflicts: # cmd/cometbft/commands/debug/kill.go * Revert "Unsafe int cast in `kill` command (#783)" This reverts commit 5cf3226cee122f197ec928daaf67864306196d32. * Unsafe int cast in `kill` command (#783) * Unsafe int cast in `kill` command * Revert "Unsafe int cast in `kill` command" This reverts commit bbd649bd372ca90f83dea7b424d67dafbd9eb541. * Changed strategy --------- Co-authored-by: Sergio Mena --- cmd/cometbft/commands/debug/kill.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cometbft/commands/debug/kill.go b/cmd/cometbft/commands/debug/kill.go index 3a1c993bcdc..93edde582e9 100644 --- a/cmd/cometbft/commands/debug/kill.go +++ b/cmd/cometbft/commands/debug/kill.go @@ -33,7 +33,7 @@ $ cometbft debug 34255 /path/to/cmt-debug.zip`, } func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) + pid, err := strconv.Atoi(args[0]) if err != nil { return err } @@ -100,7 +100,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the CometBFT process to a file // // NOTE: This will only work on UNIX systems. @@ -123,7 +123,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the CometBFT process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill CometBFT process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { From 8d280444d689208142aae0e4c396018d6fcdd7bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 09:05:40 -0400 Subject: [PATCH 05/18] build(deps): Bump bufbuild/buf-setup-action from 1.17.0 to 1.18.0 (#814) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.17.0 to 1.18.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.17.0...v1.18.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 2b991702f55..e004811c641 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.17.0 + - uses: bufbuild/buf-setup-action@v1.18.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 2e13f73a71385e8ee5da33ed0121c4975a70f133 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 20:26:10 -0400 Subject: [PATCH 06/18] rpc: Remove response data from response failure logs (backport #829) (#837) --- .../improvements/654-rpc-rm-response-data-logs.md | 3 +++ rpc/jsonrpc/server/http_json_handler.go | 6 +++--- rpc/jsonrpc/server/http_server.go | 4 ++-- rpc/jsonrpc/server/http_uri_handler.go | 8 ++++---- 4 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 .changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md diff --git a/.changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md b/.changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md new file mode 100644 index 00000000000..3fddfee8e71 --- /dev/null +++ b/.changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md @@ -0,0 +1,3 @@ +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) \ No newline at end of file diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 8d746c7a224..8e5b363f151 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -25,7 +25,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusBadRequest, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -48,7 +48,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if err := json.Unmarshal(b, &request); err != nil { res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -122,7 +122,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han wErr = WriteRPCResponseHTTP(w, responses...) } if wErr != nil { - logger.Error("failed to write responses", "res", responses, "err", wErr) + logger.Error("failed to write responses", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index a8005f5a4d6..101e86d2601 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -188,7 +188,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // If RPCResponse if res, ok := e.(types.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } else { // Panics can contain anything, attempt to normalize it as an error. @@ -207,7 +207,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler res := types.RPCInternalError(types.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 134eff20f03..6381d91d70f 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -27,7 +27,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { res := types.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } } } @@ -45,7 +45,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) + logger.Error("failed to write response", "err", wErr) } return } @@ -58,7 +58,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit if err != nil { if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, types.RPCInternalError(dummyID, err)); err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } return @@ -71,7 +71,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit err = WriteRPCResponseHTTP(w, resp) } if err != nil { - logger.Error("failed to write response", "res", result, "err", err) + logger.Error("failed to write response", "err", err) return } } From 7bcca5cb93605274a1fb8dfcdb673f0f39741286 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 09:04:58 +0200 Subject: [PATCH 07/18] build(deps): Bump slackapi/slack-github-action from 1.23.0 to 1.24.0 (#866) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.23.0 to 1.24.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v1.23.0...v1.24.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/e2e-long-37x.yml | 2 +- .github/workflows/e2e-nightly-34x.yml | 4 ++-- .github/workflows/e2e-nightly-37x.yml | 2 +- .github/workflows/e2e-nightly-main.yml | 4 ++-- .github/workflows/fuzz-nightly.yml | 2 +- .github/workflows/pre-release.yml | 2 +- .github/workflows/release.yml | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/e2e-long-37x.yml b/.github/workflows/e2e-long-37x.yml index e96c34d315e..51c38f1cda8 100644 --- a/.github/workflows/e2e-long-37x.yml +++ b/.github/workflows/e2e-long-37x.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index a727e88a56f..d232806afe2 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK @@ -84,7 +84,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on success - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml index 26614fe4b42..0ac1f50d313 100644 --- a/.github/workflows/e2e-nightly-37x.yml +++ b/.github/workflows/e2e-nightly-37x.yml @@ -55,7 +55,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml index f4f066e2294..6fa05d90b9d 100644 --- a/.github/workflows/e2e-nightly-main.yml +++ b/.github/workflows/e2e-nightly-main.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK @@ -73,7 +73,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on success - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index d7f378aac34..4af09787419 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack on failure - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index bbd28cbd067..4cf6a83cb87 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon pre-release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e1c959c3f32..343818356e2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Notify Slack upon release - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK From 92f900f90dfb39f9f9b584b31cdb3106bf4d1a9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 09:11:01 +0200 Subject: [PATCH 08/18] build(deps): Bump bufbuild/buf-setup-action from 1.18.0 to 1.19.0 (#867) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.18.0 to 1.19.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.18.0...v1.19.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jasmina Malicevic --- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index e004811c641..ec7fa8d42eb 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.18.0 + - uses: bufbuild/buf-setup-action@v1.19.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 6c9461777428c7650dc27020d9f06a9945f7d0e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:34:07 +0300 Subject: [PATCH 09/18] build(deps): Bump bufbuild/buf-setup-action from 1.19.0 to 1.20.0 (#911) Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.19.0 to 1.20.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/v1.19.0...v1.20.0) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index ec7fa8d42eb..c0dfaf05513 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.19.0 + - uses: bufbuild/buf-setup-action@v1.20.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 587522f5df2dba7acb3ecc0eff0b0e972613bda2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 20:16:49 +0200 Subject: [PATCH 10/18] v0.37.x: Prevent a transaction to appear twice in the mempool (backport #890) (#926) * add a test to trigger the issue * add a fix (in particular, we track the sender when receiving a tx twice) * add a changelog --------- Co-authored-by: Daniel (cherry picked from commit a09f5d33ecd8846369b93cae9063291eb8abc3a0) * update fix and test wrt. v0.37.x --------- Co-authored-by: Pierre Sutra <0track@gmail.com> --- .../bug-fixes/890-mempool-fix-cache.md | 1 + mempool/v0/clist_mempool.go | 14 ++++++ mempool/v0/clist_mempool_test.go | 46 +++++++++++++++++++ 3 files changed, 61 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/890-mempool-fix-cache.md diff --git a/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md b/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md new file mode 100644 index 00000000000..34dae0463a7 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md @@ -0,0 +1 @@ +- `[mempool/clist_mempool]` \#890 Prevent a transaction to appear twice in the mempool (@otrack) diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index 9500a3fb46d..2f4a0264bef 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -391,6 +391,20 @@ func (mem *CListMempool) resCbFirstTime( return } + // Check transaction not already in the mempool + if e, ok := mem.txsMap.Load(types.Tx(tx).Key()); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + memTx.senders.LoadOrStore(peerID, true) + mem.logger.Debug( + "transaction already there, not adding it again", + "tx", types.Tx(tx).Hash(), + "res", r, + "height", mem.height, + "total", mem.Size(), + ) + return + } + memTx := &mempoolTx{ height: mem.height, gasWanted: r.CheckTx.GasWanted, diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go index 88b5c70521e..35fc0663171 100644 --- a/mempool/v0/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -6,6 +6,7 @@ import ( "fmt" mrand "math/rand" "os" + "strconv" "testing" "time" @@ -638,6 +639,51 @@ func TestMempoolTxsBytes(t *testing.T) { } +func TestMempoolNoCacheOverflow(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) + app := kvstore.NewApplication() + _, server := newRemoteApp(t, sockPath, app) + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) + cfg := config.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(proxy.NewRemoteClientCreator(sockPath, "socket", true), cfg) + defer cleanup() + + // add tx0 + var tx0 = types.Tx([]byte{0x01}) + err := mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // saturate the cache to remove tx0 + for i := 1; i <= mp.config.CacheSize; i++ { + err = mp.CheckTx(types.Tx([]byte(strconv.Itoa(i))), nil, mempool.TxInfo{}) + require.NoError(t, err) + } + err = mp.FlushAppConn() + require.NoError(t, err) + assert.False(t, mp.cache.Has(types.Tx([]byte{0x01}))) + + // add again tx0 + err = mp.CheckTx(tx0, nil, mempool.TxInfo{}) + require.NoError(t, err) + err = mp.FlushAppConn() + require.NoError(t, err) + + // tx0 should appear only once in mp.txs + found := 0 + for e := mp.txs.Front(); e != nil; e = e.Next() { + if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { + found++ + } + } + assert.True(t, found == 1) +} + // This will non-deterministically catch some concurrency failures like // https://github.com/tendermint/tendermint/issues/3509 // TODO: all of the tests should probably also run using the remote proxy app From 9cbdef893c271700e95dac3f8d77882f9042385b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 11:16:25 +0200 Subject: [PATCH 11/18] e2e: Generate prometheus.yaml on setup (#954) (#957) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Generate prometheus yaml on setup * change file extension * move template to file * add templates dir (cherry picked from commit 3701c9f3c1a12a0ce35cef98788bdebb382bd74e) Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> --- test/e2e/pkg/templates/prometheus-yaml.tmpl | 9 ++++++ test/e2e/pkg/testnet.go | 33 +++++++++++++++++++++ test/e2e/runner/setup.go | 6 ++++ 3 files changed, 48 insertions(+) create mode 100644 test/e2e/pkg/templates/prometheus-yaml.tmpl diff --git a/test/e2e/pkg/templates/prometheus-yaml.tmpl b/test/e2e/pkg/templates/prometheus-yaml.tmpl new file mode 100644 index 00000000000..3c7636e0ddc --- /dev/null +++ b/test/e2e/pkg/templates/prometheus-yaml.tmpl @@ -0,0 +1,9 @@ +global: + scrape_interval: 1s + +scrape_configs: +{{- range .Nodes }} + - job_name: '{{ .Name }}' + static_configs: + - targets: ['localhost:{{ .PrometheusProxyPort }}'] +{{end}} \ No newline at end of file diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index c630f570081..eb65c0e7500 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -1,21 +1,26 @@ package e2e import ( + "bytes" "errors" "fmt" "io" "math/rand" "net" + "os" "path/filepath" "sort" "strconv" "strings" + "text/template" "time" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" rpchttp "github.com/cometbft/cometbft/rpc/client/http" + + _ "embed" ) const ( @@ -470,6 +475,34 @@ func (t Testnet) HasPerturbations() bool { return false } +//go:embed templates/prometheus-yaml.tmpl +var prometheusYamlTemplate string + +func (t Testnet) prometheusConfigBytes() ([]byte, error) { + tmpl, err := template.New("prometheus-yaml").Parse(prometheusYamlTemplate) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, t) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (t Testnet) WritePrometheusConfig() error { + bytes, err := t.prometheusConfigBytes() + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(t.Dir, "prometheus.yaml"), bytes, 0o644) //nolint:gosec + if err != nil { + return err + } + return nil +} + // Address returns a P2P endpoint address for the node. func (n Node) AddressP2P(withID bool) string { ip := n.IP.String() diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 8c3597bbcf6..0ec74fe8c80 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -116,6 +116,12 @@ func Setup(testnet *e2e.Testnet, infp infra.Provider) error { )).Save() } + if testnet.Prometheus { + if err := testnet.WritePrometheusConfig(); err != nil { + return err + } + } + return nil } From e45db5dd33f7a467117c89f8fbd9fd271bb133b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 19:38:59 -0400 Subject: [PATCH 12/18] build(deps): Bump bufbuild/buf-setup-action from 1.20.0 to 1.21.0 (#936) --- .github/workflows/proto-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index c0dfaf05513..a3404dd73e0 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -15,7 +15,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.20.0 + - uses: bufbuild/buf-setup-action@v1.21.0 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' From 3f62405567e14773ff3aed5e8f6443e5ebc6960e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 19:42:12 -0400 Subject: [PATCH 13/18] build(deps): Bump docker/login-action from 2.1.0 to 2.2.0 (#937) --- .github/workflows/cometbft-docker.yml | 2 +- .github/workflows/testapp-docker.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index ebd3dbf4b04..123294627b2 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -45,7 +45,7 @@ jobs: - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index 0a828deb1d2..bb61e5f92d6 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -45,7 +45,7 @@ jobs: - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.1.0 + uses: docker/login-action@v2.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} From 79f182b7d88f4b45abd6ebb33025a84623082b11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 19:47:13 -0400 Subject: [PATCH 14/18] build(deps): Bump docker/setup-buildx-action from 2.5.0 to 2.7.0 (#958) --- .github/workflows/cometbft-docker.yml | 2 +- .github/workflows/testapp-docker.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index 123294627b2..16886319c8a 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -41,7 +41,7 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.7.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index bb61e5f92d6..1eb945f1d3c 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -41,7 +41,7 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.7.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} From 3e6b456d778b0e90afe4c115998f5250666b769a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 19:52:07 -0400 Subject: [PATCH 15/18] build(deps): Bump docker/build-push-action from 4.0.0 to 4.1.1 (#959) --- .github/workflows/cometbft-docker.yml | 2 +- .github/workflows/testapp-docker.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index 16886319c8a..eb471088385 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -51,7 +51,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.1 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index 1eb945f1d3c..d4b6126d74f 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -51,7 +51,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.1 with: context: . file: ./test/e2e/docker/Dockerfile From 4f04b67978d808f069b260a28cd9d2893323e5f6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 09:56:36 +0200 Subject: [PATCH 16/18] Add requirement for `CheckTx` in ABCI spec (backport #928) (#965) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add requirement for `CheckTx` in ABCI spec (#928) * Add requirement for `CheckTx` in ABCI spec * Apply suggestions from code review Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> Co-authored-by: Lasaro Co-authored-by: Jasmina Malicevic * Added sentence from @jmalicevic's suggestion * Addressed review comments * Update spec/abci/abci++_app_requirements.md Co-authored-by: Daniel * Addressed comment * `CheckTx` requirements for the app, new version * New version of `CheckTx` requirement, after yesterday's discussion * Update spec/abci/abci++_app_requirements.md Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> * Addressed @josef-widder's comment --------- Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> Co-authored-by: Lasaro Co-authored-by: Jasmina Malicevic Co-authored-by: Daniel (cherry picked from commit 21e94a288c910c0e9346c9f0aac217f63cebd512) # Conflicts: # spec/abci/abci++_app_requirements.md * Revert "Add requirement for `CheckTx` in ABCI spec (#928)" * Add requirement for `CheckTx` in ABCI spec (#928) * Add requirement for `CheckTx` in ABCI spec * Apply suggestions from code review Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> Co-authored-by: Lasaro Co-authored-by: Jasmina Malicevic * Added sentence from @jmalicevic's suggestion * Addressed review comments * Update spec/abci/abci++_app_requirements.md Co-authored-by: Daniel * Addressed comment * `CheckTx` requirements for the app, new version * New version of `CheckTx` requirement, after yesterday's discussion * Update spec/abci/abci++_app_requirements.md Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> * Addressed @josef-widder's comment --------- Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> Co-authored-by: Lasaro Co-authored-by: Jasmina Malicevic Co-authored-by: Daniel --------- Co-authored-by: Sergio Mena Co-authored-by: Hernán Vanzetto <15466498+hvanz@users.noreply.github.com> Co-authored-by: Lasaro Co-authored-by: Jasmina Malicevic Co-authored-by: Daniel --- spec/abci/abci++_app_requirements.md | 39 +++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/spec/abci/abci++_app_requirements.md b/spec/abci/abci++_app_requirements.md index 2fbe1993d9c..52c325d6c3e 100644 --- a/spec/abci/abci++_app_requirements.md +++ b/spec/abci/abci++_app_requirements.md @@ -7,6 +7,8 @@ title: Requirements for the Application ## Formal Requirements +### Consensus Connection Requirements + This section specifies what CometBFT expects from the Application. It is structured as a set of formal requirements that can be used for testing and verification of the Application's logic. @@ -155,7 +157,7 @@ Additionally, *p*'s `DeliverTx` on transactions creates a set of transaction res Note that Requirements 11 and 12, combined with the Agreement property of consensus ensure state machine replication, i.e., the Application state evolves consistently at all correct processes. -Finally, notice that `PrepareProposal` has determinism-related +Also, notice that `PrepareProposal` has determinism-related requirements associated. Indeed, `PrepareProposal` is not required to be deterministic: @@ -170,6 +172,41 @@ Likewise, `ExtendVote` can also be non-deterministic: * *wrp = wrq ⇏ erp = erq* --> + +### Mempool Connection Requirements + +Let *CheckTxCodestx,p,h* denote the set of result codes returned by *p*'s Application, +via `ResponseCheckTx`, +to successive calls to `RequestCheckTx` occurring while the Application is at height *h* +and having transaction *tx* as parameter. +*CheckTxCodestx,p,h* is a set since *p*'s Application may +return different result codes during height *h*. +If *CheckTxCodestx,p,h* is a singleton set, i.e. the Application always returned +the same result code in `ResponseCheckTx` while at height *h*, +we define *CheckTxCodetx,p,h* as the singleton value of *CheckTxCodestx,p,h*. +If *CheckTxCodestx,p,h* is not a singleton set, *CheckTxCodetx,p,h* is undefined. +Let predicate *OK(CheckTxCodetx,p,h)* denote whether *CheckTxCodetx,p,h* is `SUCCESS`. + +* Requirement 13 [`CheckTx`, eventual non-oscillation]: For any transaction *tx*, + there exists a boolean value *b*, + and a height *hstable* such that, + for any correct process *p*, + *CheckTxCodetx,p,h* is defined, and + *OK(CheckTxCodetx,p,h) = b* + for any height *h ≥ hstable*. + +Requirement 13 ensures that +a transaction will eventually stop oscillating between `CheckTx` success and failure +if it stays in *p's* mempool for long enough. +This condition on the Application's behavior allows the mempool to ensure that +a transaction will leave the mempool of all full nodes, +either because it is expunged everywhere due to failing `CheckTx` calls, +or because it stays valid long enough to be gossipped, proposed and decided. +Although Requirement 13 defines a global *hstable*, application developers +can consider such stabilization height as local to process *p* (*hp,stable*), +without loss for generality. +In contrast, the value of *b* MUST be the same across all processes. + ## Managing the Application state and related topics ### Connection State From 9a453da40d65d89da94cb145c81e861c821cc376 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 15:42:06 +0200 Subject: [PATCH 17/18] fix: avoid recursive call after rename to (*PeerState).MarshalJSON (#865) (#969) * avoid recursive call after rename to (*PeerState).MarshalJSON * add test * add change doc * explain for nolint * fix lint * fix golangci-lint to v1.52.2 * fix golangci-lint to v1.52.2 * Revert "fix golangci-lint to v1.52.2" This reverts commit 598a9ef4c86fc29cf038251676c33a222217826c. * Revert "fix golangci-lint to v1.52.2" This reverts commit a8aad121e27382813e95b1911b1b560c62e1c7c3. * Reintroduced `cmtjson` * Avoid copying Mutex * Avoid copying Mutex -- 2nd try, more succint * Update .changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md * Update consensus/reactor_test.go --------- Co-authored-by: Sergio Mena (cherry picked from commit f6ea09171a2bf9f695f59b65f5c51e4a8c168015) Co-authored-by: mmsqe --- .../865-fix-peerstate-marshaljson.md | 2 ++ consensus/reactor.go | 3 +- consensus/reactor_test.go | 30 +++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 .changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md diff --git a/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md b/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md new file mode 100644 index 00000000000..318bda315c5 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md @@ -0,0 +1,2 @@ +- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON + ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/consensus/reactor.go b/consensus/reactor.go index ca410fa68bf..1c5de7f1f63 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -1067,7 +1067,8 @@ func (ps *PeerState) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cmtjson.Marshal(ps) + type jsonPeerState PeerState + return cmtjson.Marshal((*jsonPeerState)(ps)) } // GetHeight returns an atomic snapshot of the PeerRoundState's height diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 193b1b3d8a0..793f4e29e4a 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -26,6 +26,7 @@ import ( "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cometbft/cometbft/libs/bits" "github.com/cometbft/cometbft/libs/bytes" + "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtsync "github.com/cometbft/cometbft/libs/sync" mempl "github.com/cometbft/cometbft/mempool" @@ -1005,3 +1006,32 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { }) } } + +func TestMarshalJSONPeerState(t *testing.T) { + ps := NewPeerState(nil) + data, err := json.Marshal(ps) + require.NoError(t, err) + require.JSONEq(t, `{ + "round_state":{ + "height": "0", + "round": -1, + "step": 0, + "start_time": "0001-01-01T00:00:00Z", + "proposal": false, + "proposal_block_part_set_header": + {"total":0, "hash":""}, + "proposal_block_parts": null, + "proposal_pol_round": -1, + "proposal_pol": null, + "prevotes": null, + "precommits": null, + "last_commit_round": -1, + "last_commit": null, + "catchup_commit_round": -1, + "catchup_commit": null + }, + "stats":{ + "votes":"0", + "block_parts":"0"} + }`, string(data)) +} From fe45483be36ebfea7e172a3ad949e8fe09a8fd95 Mon Sep 17 00:00:00 2001 From: Thane Thomson Date: Wed, 14 Jun 2023 16:12:33 -0400 Subject: [PATCH 18/18] Release v0.37.2 (#972) * changelog: Clean up and reorder Signed-off-by: Thane Thomson * changelog: Add severity to security fixes Signed-off-by: Thane Thomson * changelog: Add missing entries Signed-off-by: Thane Thomson * changelog: Release v0.37.2 Signed-off-by: Thane Thomson * Rebuild changelog Signed-off-by: Thane Thomson * version: Bump to v0.37.2 Signed-off-by: Thane Thomson * test/e2e: Use Debian Bullseye as base image Golang recently started offering Debian Bookworm as the default distro for `golang:1.20`, which provides a newer version of RocksDB than what we support in cometbft-db. For now this pins the image to Bullseye, which is the base image we have been using for some time now. Signed-off-by: Thane Thomson --------- Signed-off-by: Thane Thomson --- .../771-kvindexer-parsing-big-ints.md | 2 - .../bug-fixes/771-pubsub-parsing-big-ints.md | 3 -- .../865-fix-peerstate-marshaljson.md | 2 - .../bug-fixes/890-mempool-fix-cache.md | 1 - .../771-kvindexer-parsing-big-ints.md | 4 ++ .../bug-fixes/771-pubsub-parsing-big-ints.md | 4 ++ .../654-rpc-rm-response-data-logs.md | 0 .../security-fixes/787-rpc-client-pw.md | 3 ++ .../793-cli-debug-kill-unsafe-cast.md | 2 + .../865-fix-peerstate-marshaljson.md | 3 ++ .../security-fixes/890-mempool-fix-cache.md | 3 ++ .changelog/v0.37.2/summary.md | 4 ++ CHANGELOG.md | 38 +++++++++++++++++++ test/e2e/docker/Dockerfile | 2 +- version/version.go | 2 +- 15 files changed, 63 insertions(+), 10 deletions(-) delete mode 100644 .changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md delete mode 100644 .changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md delete mode 100644 .changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md delete mode 100644 .changelog/unreleased/bug-fixes/890-mempool-fix-cache.md create mode 100644 .changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md create mode 100644 .changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md rename .changelog/{unreleased => v0.37.2}/improvements/654-rpc-rm-response-data-logs.md (100%) create mode 100644 .changelog/v0.37.2/security-fixes/787-rpc-client-pw.md create mode 100644 .changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md create mode 100644 .changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md create mode 100644 .changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md create mode 100644 .changelog/v0.37.2/summary.md diff --git a/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md deleted file mode 100644 index 8114534c051..00000000000 --- a/.changelog/unreleased/bug-fixes/771-kvindexer-parsing-big-ints.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now enabled. We are not supporting reading floats from the db into the indexer nor parsing them into BigFloats to not introduce breaking changes in minor releases. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md deleted file mode 100644 index 749b30d5b50..00000000000 --- a/.changelog/unreleased/bug-fixes/771-pubsub-parsing-big-ints.md +++ /dev/null @@ -1,3 +0,0 @@ -- `[pubsub]` Pubsub queries are now able to parse big integers (larger than int64). Very big floats - are also properly parsed into very big integers instead of being truncated to int64. - ([\#771](https://github.com/cometbft/cometbft/pull/771)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md b/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md deleted file mode 100644 index 318bda315c5..00000000000 --- a/.changelog/unreleased/bug-fixes/865-fix-peerstate-marshaljson.md +++ /dev/null @@ -1,2 +0,0 @@ -- `[consensus]` Avoid recursive call after rename to (*PeerState).MarshalJSON - ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md b/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md deleted file mode 100644 index 34dae0463a7..00000000000 --- a/.changelog/unreleased/bug-fixes/890-mempool-fix-cache.md +++ /dev/null @@ -1 +0,0 @@ -- `[mempool/clist_mempool]` \#890 Prevent a transaction to appear twice in the mempool (@otrack) diff --git a/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md b/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md new file mode 100644 index 00000000000..ba19adbc8ba --- /dev/null +++ b/.changelog/v0.37.2/bug-fixes/771-kvindexer-parsing-big-ints.md @@ -0,0 +1,4 @@ +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. We are not supporting reading floats from the db into the indexer + nor parsing them into BigFloats to not introduce breaking changes in minor + releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md b/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md new file mode 100644 index 00000000000..fc5f25a90ff --- /dev/null +++ b/.changelog/v0.37.2/bug-fixes/771-pubsub-parsing-big-ints.md @@ -0,0 +1,4 @@ +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) diff --git a/.changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md b/.changelog/v0.37.2/improvements/654-rpc-rm-response-data-logs.md similarity index 100% rename from .changelog/unreleased/improvements/654-rpc-rm-response-data-logs.md rename to .changelog/v0.37.2/improvements/654-rpc-rm-response-data-logs.md diff --git a/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md b/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md new file mode 100644 index 00000000000..209b799d9ad --- /dev/null +++ b/.changelog/v0.37.2/security-fixes/787-rpc-client-pw.md @@ -0,0 +1,3 @@ +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#787](https://github.com/cometbft/cometbft/pull/787)) \ No newline at end of file diff --git a/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md b/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md new file mode 100644 index 00000000000..7482a5ae039 --- /dev/null +++ b/.changelog/v0.37.2/security-fixes/793-cli-debug-kill-unsafe-cast.md @@ -0,0 +1,2 @@ +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#793](https://github.com/cometbft/cometbft/pull/793)) \ No newline at end of file diff --git a/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md b/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md new file mode 100644 index 00000000000..fdd9172c209 --- /dev/null +++ b/.changelog/v0.37.2/security-fixes/865-fix-peerstate-marshaljson.md @@ -0,0 +1,3 @@ +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) diff --git a/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md b/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md new file mode 100644 index 00000000000..bad30efc7ab --- /dev/null +++ b/.changelog/v0.37.2/security-fixes/890-mempool-fix-cache.md @@ -0,0 +1,3 @@ +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) diff --git a/.changelog/v0.37.2/summary.md b/.changelog/v0.37.2/summary.md new file mode 100644 index 00000000000..7ecb2739409 --- /dev/null +++ b/.changelog/v0.37.2/summary.md @@ -0,0 +1,4 @@ +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. diff --git a/CHANGELOG.md b/CHANGELOG.md index 964b002f45f..e26955a191a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # CHANGELOG +## v0.37.2 + +*June 14, 2023* + +Provides several minor bug fixes, as well as fixes for several low-severity +security issues. + +### BUG FIXES + +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. We are not supporting reading floats from the db into the indexer + nor parsing them into BigFloats to not introduce breaking changes in minor + releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) +- `[pubsub]` Pubsub queries are now able to parse big integers (larger than + int64). Very big floats are also properly parsed into very big integers + instead of being truncated to int64. + ([\#771](https://github.com/cometbft/cometbft/pull/771)) + +### IMPROVEMENTS + +- `[rpc]` Remove response data from response failure logs in order + to prevent large quantities of log data from being produced + ([\#654](https://github.com/cometbft/cometbft/issues/654)) + +### SECURITY FIXES + +- `[rpc/jsonrpc/client]` **Low severity** - Prevent RPC + client credentials from being inadvertently dumped to logs + ([\#787](https://github.com/cometbft/cometbft/pull/787)) +- `[cmd/cometbft/commands/debug/kill]` **Low severity** - Fix unsafe int cast in + `debug kill` command ([\#793](https://github.com/cometbft/cometbft/pull/793)) +- `[consensus]` **Low severity** - Avoid recursive call after rename to + `(*PeerState).MarshalJSON` + ([\#863](https://github.com/cometbft/cometbft/pull/863)) +- `[mempool/clist_mempool]` **Low severity** - Prevent a transaction from + appearing twice in the mempool + ([\#890](https://github.com/cometbft/cometbft/pull/890): @otrack) + ## v0.37.1 *April 26, 2023* diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index cff113638f1..2ffe56d0b93 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.20 +FROM golang:1.20-bullseye RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/version/version.go b/version/version.go index fc7b2096832..0c9057a7acc 100644 --- a/version/version.go +++ b/version/version.go @@ -5,7 +5,7 @@ const ( // The default version of TMCoreSemVer is the value used as the // fallback version of CometBFT when not using git describe. // It is formatted with semantic versioning. - TMCoreSemVer = "0.37.1" + TMCoreSemVer = "0.37.2" // ABCISemVer is the semantic version of the ABCI protocol ABCISemVer = "1.0.0" ABCIVersion = ABCISemVer