From 266d3ac6c32d4ffd48aa717555bb8671be8c78d4 Mon Sep 17 00:00:00 2001
From: chainchad <96362174+chainchad@users.noreply.github.com>
Date: Tue, 27 Jun 2023 19:52:18 -0400
Subject: [PATCH 01/40] Bump version and update CHANGELOG for core v2.3.0
---
VERSION | 2 +-
docs/CHANGELOG.md | 5 +++++
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index ccbccc3dc62..276cbf9e285 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.2.0
+2.3.0
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 2c5d1a761e9..d347297fb61 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -8,6 +8,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+
+...
+
+## 2.3.0 - UNRELEASED
+
### Added
- Add a new field called `Order` (range from 1 to 100) to `EVM.Nodes` that is used for the `PriorityLevel` node selector and also as a tie-breaker for `HighestHead` and `TotalDifficulty`. `Order` levels are considered in ascending order. If not defined it will default to `Order = 100` (last level).
- Added new node selection mode called `PriorityLevel` for EVM, it is a tiered round-robin in ascending order of the`Order` field. Example:
From a0ec6a526426c0597bc2c5b5ed0cb5ab99d2dc3b Mon Sep 17 00:00:00 2001
From: David Cauchi <13139524+davidcauchi@users.noreply.github.com>
Date: Thu, 20 Jul 2023 22:48:20 +0200
Subject: [PATCH 02/40] Add base config (#9865)
* Add base config
* Updated Docs
* Base goerli config to match Optimism goerli
---------
Co-authored-by: simsonraj
(cherry picked from commit ee724fc32951f2c8f7f234695afc4dbf0591eeea)
---
.../evm/config/v2/defaults/Base_Goerli.toml | 26 +++
.../evm/config/v2/defaults/Base_Mainnet.toml | 26 +++
docs/CONFIG.md | 156 ++++++++++++++++++
3 files changed, 208 insertions(+)
create mode 100644 core/chains/evm/config/v2/defaults/Base_Goerli.toml
create mode 100644 core/chains/evm/config/v2/defaults/Base_Mainnet.toml
diff --git a/core/chains/evm/config/v2/defaults/Base_Goerli.toml b/core/chains/evm/config/v2/defaults/Base_Goerli.toml
new file mode 100644
index 00000000000..2a9c59173c1
--- /dev/null
+++ b/core/chains/evm/config/v2/defaults/Base_Goerli.toml
@@ -0,0 +1,26 @@
+ChainID = '84531'
+ChainType = 'optimismBedrock'
+FinalityDepth = 200
+LogPollInterval = '2s'
+NoNewHeadsThreshold = '40s'
+MinIncomingConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+PriceMin = '1 wei'
+BumpMin = '100 wei'
+
+[GasEstimator.BlockHistory]
+BlockHistorySize = 60
+
+[Transactions]
+ResendAfterThreshold = '30s'
+
+[HeadTracker]
+HistoryDepth = 300
+
+[NodePool]
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
\ No newline at end of file
diff --git a/core/chains/evm/config/v2/defaults/Base_Mainnet.toml b/core/chains/evm/config/v2/defaults/Base_Mainnet.toml
new file mode 100644
index 00000000000..5455ad22c12
--- /dev/null
+++ b/core/chains/evm/config/v2/defaults/Base_Mainnet.toml
@@ -0,0 +1,26 @@
+ChainID = '8453'
+ChainType = 'optimismBedrock'
+FinalityDepth = 200
+LogPollInterval = '2s'
+NoNewHeadsThreshold = '40s'
+MinIncomingConfirmations = 1
+
+[GasEstimator]
+EIP1559DynamicFees = true
+PriceMin = '1 wei'
+BumpMin = '100 wei'
+
+[GasEstimator.BlockHistory]
+BlockHistorySize = 24
+
+[Transactions]
+ResendAfterThreshold = '30s'
+
+[HeadTracker]
+HistoryDepth = 300
+
+[NodePool]
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
\ No newline at end of file
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index b817ddd143e..fdbf92fed45 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -3119,6 +3119,84 @@ GasLimit = 5300000
+Base Mainnet (8453)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'optimismBedrock'
+FinalityDepth = 200
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '2s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '40s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '30s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 wei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '100 wei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 24
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 300
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
Arbitrum Mainnet (42161)
```toml
@@ -3582,6 +3660,84 @@ GasLimit = 5300000
+Base Goerli (84531)
+
+```toml
+AutoCreateKey = true
+BlockBackfillDepth = 10
+BlockBackfillSkip = false
+ChainType = 'optimismBedrock'
+FinalityDepth = 200
+FinalityTagEnabled = false
+LogBackfillBatchSize = 1000
+LogPollInterval = '2s'
+LogKeepBlocksDepth = 100000
+MinIncomingConfirmations = 1
+MinContractPayment = '0.00001 link'
+NonceAutoSync = true
+NoNewHeadsThreshold = '40s'
+RPCDefaultBatchSize = 250
+RPCBlockQueryDelay = 1
+
+[Transactions]
+ForwardersEnabled = false
+MaxInFlight = 16
+MaxQueued = 250
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '168h0m0s'
+ResendAfterThreshold = '30s'
+
+[BalanceMonitor]
+Enabled = true
+
+[GasEstimator]
+Mode = 'BlockHistory'
+PriceDefault = '20 gwei'
+PriceMax = '115792089237316195423570985008687907853269984665.640564039457584007913129639935 tether'
+PriceMin = '1 wei'
+LimitDefault = 500000
+LimitMax = 500000
+LimitMultiplier = '1'
+LimitTransfer = 21000
+BumpMin = '100 wei'
+BumpPercent = 20
+BumpThreshold = 3
+EIP1559DynamicFees = true
+FeeCapDefault = '100 gwei'
+TipCapDefault = '1 wei'
+TipCapMin = '1 wei'
+
+[GasEstimator.BlockHistory]
+BatchSize = 25
+BlockHistorySize = 60
+CheckInclusionBlocks = 12
+CheckInclusionPercentile = 90
+TransactionPercentile = 60
+
+[HeadTracker]
+HistoryDepth = 300
+MaxBufferSize = 3
+SamplingInterval = '1s'
+
+[NodePool]
+PollFailureThreshold = 5
+PollInterval = '10s'
+SelectionMode = 'HighestHead'
+SyncThreshold = 10
+
+[OCR]
+ContractConfirmations = 1
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+ObservationGracePeriod = '1s'
+
+[OCR2]
+[OCR2.Automation]
+GasLimit = 5300000
+```
+
+
+
Arbitrum Rinkeby (421611)
```toml
From 932b20c98f94208bebbc9c3558a39b80a351fe8c Mon Sep 17 00:00:00 2001
From: Simson
Date: Fri, 21 Jul 2023 02:03:57 -0400
Subject: [PATCH 03/40] Base(Mainnet) + Optimism hotfix (#9862)
* Base(Mainnet) + Optimism hotfix
* Revert "Base(Mainnet) + Optimism hotfix"
This reverts commit a13e8487eff0935b3eacf78bc020288842028c96.
* Base+ optimism hotfix for non-successfull ocr rounds
* fix tests
(cherry picked from commit 38e9e4a0ba0e19e6a87a258e10c850921f2301c9)
---
core/services/ocr/contract_tracker.go | 2 +-
core/services/ocr/contract_tracker_test.go | 9 ---------
2 files changed, 1 insertion(+), 10 deletions(-)
diff --git a/core/services/ocr/contract_tracker.go b/core/services/ocr/contract_tracker.go
index cd04fc97ee7..671f6b8cfad 100644
--- a/core/services/ocr/contract_tracker.go
+++ b/core/services/ocr/contract_tracker.go
@@ -388,7 +388,7 @@ func (t *OCRContractTracker) ConfigFromLogs(ctx context.Context, changedInBlock
// LatestBlockHeight queries the eth node for the most recent header
func (t *OCRContractTracker) LatestBlockHeight(ctx context.Context) (blockheight uint64, err error) {
switch t.cfg.ChainType() {
- case config.ChainMetis, config.ChainOptimismBedrock:
+ case config.ChainMetis:
// We skip confirmation checking anyway on these L2s so there's no need to
// care about the block height; we have no way of getting the L1 block
// height anyway
diff --git a/core/services/ocr/contract_tracker_test.go b/core/services/ocr/contract_tracker_test.go
index d3646b30f16..33561a53023 100644
--- a/core/services/ocr/contract_tracker_test.go
+++ b/core/services/ocr/contract_tracker_test.go
@@ -24,7 +24,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
configtest "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest/v2"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest"
- v2 "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/evmtest/v2"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
"github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr"
@@ -107,14 +106,6 @@ func newContractTrackerUni(t *testing.T, opts ...interface{}) (uni contractTrack
func Test_OCRContractTracker_LatestBlockHeight(t *testing.T) {
t.Parallel()
- t.Run("on L2 chains, always returns 0", func(t *testing.T) {
- uni := newContractTrackerUni(t, v2.ChainOptimismMainnet(t))
- l, err := uni.tracker.LatestBlockHeight(testutils.Context(t))
- require.NoError(t, err)
-
- assert.Equal(t, uint64(0), l)
- })
-
t.Run("before first head incoming, looks up on-chain", func(t *testing.T) {
uni := newContractTrackerUni(t)
uni.ec.On("HeadByNumber", mock.AnythingOfType("*context.cancelCtx"), (*big.Int)(nil)).Return(&evmtypes.Head{Number: 42}, nil)
From 2e251f8cc4b4d3ac345c1c9c70bc28596277bd0a Mon Sep 17 00:00:00 2001
From: Simson
Date: Fri, 21 Jul 2023 03:37:58 -0400
Subject: [PATCH 04/40] Added automation gaslimit for Base (#9878)
(cherry picked from commit 22a60fbcad22367cca4d1ec7604b169e3a38108f)
---
core/chains/evm/config/v2/defaults/Base_Goerli.toml | 5 ++++-
core/chains/evm/config/v2/defaults/Base_Mainnet.toml | 5 ++++-
docs/CONFIG.md | 4 ++--
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/core/chains/evm/config/v2/defaults/Base_Goerli.toml b/core/chains/evm/config/v2/defaults/Base_Goerli.toml
index 2a9c59173c1..5ecfd036f46 100644
--- a/core/chains/evm/config/v2/defaults/Base_Goerli.toml
+++ b/core/chains/evm/config/v2/defaults/Base_Goerli.toml
@@ -23,4 +23,7 @@ HistoryDepth = 300
SyncThreshold = 10
[OCR]
-ContractConfirmations = 1
\ No newline at end of file
+ContractConfirmations = 1
+
+[OCR2.Automation]
+GasLimit = 6500000
diff --git a/core/chains/evm/config/v2/defaults/Base_Mainnet.toml b/core/chains/evm/config/v2/defaults/Base_Mainnet.toml
index 5455ad22c12..314c12f8c54 100644
--- a/core/chains/evm/config/v2/defaults/Base_Mainnet.toml
+++ b/core/chains/evm/config/v2/defaults/Base_Mainnet.toml
@@ -23,4 +23,7 @@ HistoryDepth = 300
SyncThreshold = 10
[OCR]
-ContractConfirmations = 1
\ No newline at end of file
+ContractConfirmations = 1
+
+[OCR2.Automation]
+GasLimit = 6500000
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index fdbf92fed45..ea512078c3e 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -3192,7 +3192,7 @@ ObservationGracePeriod = '1s'
[OCR2]
[OCR2.Automation]
-GasLimit = 5300000
+GasLimit = 6500000
```
@@ -3733,7 +3733,7 @@ ObservationGracePeriod = '1s'
[OCR2]
[OCR2.Automation]
-GasLimit = 5300000
+GasLimit = 6500000
```
From 42c4be6cef1a24f389154a528a481210ce10f2c1 Mon Sep 17 00:00:00 2001
From: simsonraj
Date: Fri, 21 Jul 2023 20:05:22 +0530
Subject: [PATCH 05/40] re run config docs generate
---
docs/CONFIG.md | 2 --
1 file changed, 2 deletions(-)
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index ea512078c3e..9cc91c91e58 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -3127,7 +3127,6 @@ BlockBackfillDepth = 10
BlockBackfillSkip = false
ChainType = 'optimismBedrock'
FinalityDepth = 200
-FinalityTagEnabled = false
LogBackfillBatchSize = 1000
LogPollInterval = '2s'
LogKeepBlocksDepth = 100000
@@ -3668,7 +3667,6 @@ BlockBackfillDepth = 10
BlockBackfillSkip = false
ChainType = 'optimismBedrock'
FinalityDepth = 200
-FinalityTagEnabled = false
LogBackfillBatchSize = 1000
LogPollInterval = '2s'
LogKeepBlocksDepth = 100000
From d0c54ead97a07a9759d9a51736c219c2c6a18681 Mon Sep 17 00:00:00 2001
From: chainchad <96362174+chainchad@users.noreply.github.com>
Date: Wed, 26 Jul 2023 10:46:58 -0400
Subject: [PATCH 06/40] Bump version and update CHANGELOG for core v2.4.0
---
VERSION | 2 +-
docs/CHANGELOG.md | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index 276cbf9e285..197c4d5c2d7 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.3.0
+2.4.0
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 615eaa4ab45..38ec4d263d7 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,6 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+...
+
+## 2.4.0 - UNRELEASED
### Fixed
- Updated `v2/keys/evm` and `v2/keys/eth` routes to return 400 and 404 status codes where appropriate. Previously 500s were returned when requested resources were not found or client requests could not be parsed.
From e6ebf1766958f3c63651f729cb17a00018868498 Mon Sep 17 00:00:00 2001
From: chainchad <96362174+chainchad@users.noreply.github.com>
Date: Fri, 28 Jul 2023 10:49:17 -0400
Subject: [PATCH 07/40] Finalize date on changelog for 2.3.0
---
docs/CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index d347297fb61..b59e51c8859 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,7 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.3.0 - UNRELEASED
+
+
+## 2.3.0 - 2023-07-28
### Added
- Add a new field called `Order` (range from 1 to 100) to `EVM.Nodes` that is used for the `PriorityLevel` node selector and also as a tie-breaker for `HighestHead` and `TotalDifficulty`. `Order` levels are considered in ascending order. If not defined it will default to `Order = 100` (last level).
@@ -66,8 +68,6 @@ Node operators may wish to add alerting based around these metrics.
- Legacy chain types Optimism and Optimism2. OptimismBedrock is now used to handle Optimism's special cases.
- Optimism Kovan configurations along with legacy error messages.
-
-
# 2.2.0 - 2023-06-12
### Added
From f47eb347cd87a8aacde76789233a83f02739a3c9 Mon Sep 17 00:00:00 2001
From: Patrick
Date: Tue, 8 Aug 2023 16:23:16 -0400
Subject: [PATCH 08/40] fix/changelog-simple-passwords: adding notification for
upcoming breaking change wrt simple passwords in 2.5 (#10110)
(cherry picked from commit 33f4e1a852da99f3a58f2b834533c495d095f874)
---
docs/CHANGELOG.md | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 38ec4d263d7..d07d2f49e1e 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -18,7 +18,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Added the ability to specify and merge fields from multiple secrets files. Overrides of fields and keys are not allowed.
-## 2.3.0 - UNRELEASED
+### Upcoming Breaking Change
+- Starting in 2.5.0, chainlink nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`:
+```
+AllowSimplePasswords=true
+```
+- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to RFC 3986 for special character escape rules.
+
+
+
+## 2.3.0 - 2023-07-28
### Added
- Add a new field called `Order` (range from 1 to 100) to `EVM.Nodes` that is used for the `PriorityLevel` node selector and also as a tie-breaker for `HighestHead` and `TotalDifficulty`. `Order` levels are considered in ascending order. If not defined it will default to `Order = 100` (last level).
From 6329455b1083cfa97d1c4d06cb869690d2ea021e Mon Sep 17 00:00:00 2001
From: patrickhuie19
Date: Fri, 11 Aug 2023 11:36:15 -0400
Subject: [PATCH 09/40] feature/simple-passwords-log: adding notification for
upcoming configuration change error logs for insecure configs on secure
builds
---
core/cmd/shell.go | 12 +++++++++---
core/cmd/shell_local.go | 10 ++++++++--
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/core/cmd/shell.go b/core/cmd/shell.go
index 514450ad99c..42ede9b39ed 100644
--- a/core/cmd/shell.go
+++ b/core/cmd/shell.go
@@ -116,9 +116,15 @@ func (s *Shell) errorOut(err error) cli.ExitCoder {
func (s *Shell) configExitErr(validateFn func() error) cli.ExitCoder {
err := validateFn()
if err != nil {
- fmt.Println("Invalid configuration:", err)
- fmt.Println()
- return s.errorOut(errors.New("invalid configuration"))
+ if err.Error() != "invalid secrets: Database.AllowSimplePasswords: invalid value (true): insecure configs are not allowed on secure builds" {
+ fmt.Println("Invalid configuration:", err)
+ fmt.Println()
+ return s.errorOut(errors.New("invalid configuration"))
+ } else {
+ fmt.Printf("Notification for upcoming configuration change: %v\n", err)
+ fmt.Println("This configuration will be disallowed in future production releases.")
+ fmt.Println()
+ }
}
return nil
}
diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go
index 4e0b97c894f..8323bbfbdc3 100644
--- a/core/cmd/shell_local.go
+++ b/core/cmd/shell_local.go
@@ -294,7 +294,10 @@ func (s *Shell) runNode(c *cli.Context) error {
err := s.Config.Validate()
if err != nil {
- return errors.Wrap(err, "config validation failed")
+ if err.Error() != "invalid secrets: Database.AllowSimplePasswords: invalid value (true): insecure configs are not allowed on secure builds" {
+ return errors.Wrap(err, "config validation failed")
+ }
+ lggr.Errorf("Notification for upcoming configuration change: %v", err)
}
lggr.Infow(fmt.Sprintf("Starting Chainlink Node %s at commit %s", static.Version, static.Sha), "Version", static.Version, "SHA", static.Sha)
@@ -627,7 +630,10 @@ func (s *Shell) RebroadcastTransactions(c *cli.Context) (err error) {
err = s.Config.Validate()
if err != nil {
- return s.errorOut(fmt.Errorf("error validating configuration: %+v", err))
+ if err.Error() != "invalid secrets: Database.AllowSimplePasswords: invalid value (true): insecure configs are not allowed on secure builds" {
+ return s.errorOut(fmt.Errorf("error validating configuration: %+v", err))
+ }
+ lggr.Errorf("Notification for required upcoming configuration change: %v", err)
}
err = keyStore.Unlock(s.Config.Password().Keystore())
From 0eb4542630105e98d9e4f77a813d0ef4a1ecaf79 Mon Sep 17 00:00:00 2001
From: Patrick
Date: Tue, 15 Aug 2023 09:26:41 -0400
Subject: [PATCH 10/40] release/2.4.0-fix: updating CHANGELOG for simple
passwords entry comments (#10165)
---
docs/CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index d07d2f49e1e..7c995a64a60 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -18,12 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Added the ability to specify and merge fields from multiple secrets files. Overrides of fields and keys are not allowed.
-### Upcoming Breaking Change
-- Starting in 2.5.0, chainlink nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`:
+### Upcoming Required Configuration Change
+- Starting in 2.6.0, chainlink nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`:
```
AllowSimplePasswords=true
```
-- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to RFC 3986 for special character escape rules.
+- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986) for special character escape rules.
From bd2c6d7c621aaae9e6b0a14833d30eb2f9d7f120 Mon Sep 17 00:00:00 2001
From: Aaron Lu <50029043+aalu1418@users.noreply.github.com>
Date: Wed, 16 Aug 2023 12:46:14 -0600
Subject: [PATCH 11/40] add evm transfer validation for eip1559 (#10196)
* add transfer validation for eip1559
* evm-transfer-eip1559-validation-proposal - adding TxAmountWithFees to WrappedEvmEstimator
* small renaming + fix mocks
* fix dynamic fee + remove unnecessary check
* rename GetCost -> GetMaxCost
* address cleanup items + add test case for GetMaxCost
---------
Co-authored-by: patrickhuie19
---
.../chains/evm/gas/mocks/evm_fee_estimator.go | 37 ++++++++++++++-
core/chains/evm/gas/models.go | 21 +++++++++
core/chains/evm/gas/models_test.go | 27 +++++++++--
core/web/evm_transfer_controller.go | 18 +-------
core/web/evm_transfer_controller_test.go | 45 +++++++++++++++++++
5 files changed, 128 insertions(+), 20 deletions(-)
diff --git a/core/chains/evm/gas/mocks/evm_fee_estimator.go b/core/chains/evm/gas/mocks/evm_fee_estimator.go
index 67eed66a529..20e6c940f7e 100644
--- a/core/chains/evm/gas/mocks/evm_fee_estimator.go
+++ b/core/chains/evm/gas/mocks/evm_fee_estimator.go
@@ -3,10 +3,12 @@
package mocks
import (
- context "context"
+ big "math/big"
assets "github.com/smartcontractkit/chainlink/v2/core/assets"
+ context "context"
+
evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types"
gas "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
@@ -104,6 +106,39 @@ func (_m *EvmFeeEstimator) GetFee(ctx context.Context, calldata []byte, feeLimit
return r0, r1, r2
}
+// GetMaxCost provides a mock function with given fields: ctx, amount, calldata, feeLimit, maxFeePrice, opts
+func (_m *EvmFeeEstimator) GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...types.Opt) (*big.Int, error) {
+ _va := make([]interface{}, len(opts))
+ for _i := range opts {
+ _va[_i] = opts[_i]
+ }
+ var _ca []interface{}
+ _ca = append(_ca, ctx, amount, calldata, feeLimit, maxFeePrice)
+ _ca = append(_ca, _va...)
+ ret := _m.Called(_ca...)
+
+ var r0 *big.Int
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) (*big.Int, error)); ok {
+ return rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) *big.Int); ok {
+ r0 = rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*big.Int)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, assets.Eth, []byte, uint32, *assets.Wei, ...types.Opt) error); ok {
+ r1 = rf(ctx, amount, calldata, feeLimit, maxFeePrice, opts...)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// HealthReport provides a mock function with given fields:
func (_m *EvmFeeEstimator) HealthReport() map[string]error {
ret := _m.Called()
diff --git a/core/chains/evm/gas/models.go b/core/chains/evm/gas/models.go
index 7c0571c897d..a9cbc0a924a 100644
--- a/core/chains/evm/gas/models.go
+++ b/core/chains/evm/gas/models.go
@@ -41,6 +41,9 @@ type EvmFeeEstimator interface {
GetFee(ctx context.Context, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (fee EvmFee, chainSpecificFeeLimit uint32, err error)
BumpFee(ctx context.Context, originalFee EvmFee, feeLimit uint32, maxFeePrice *assets.Wei, attempts []EvmPriorAttempt) (bumpedFee EvmFee, chainSpecificFeeLimit uint32, err error)
+
+ // GetMaxCost returns the total value = max price x fee units + transferred value
+ GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (*big.Int, error)
}
// NewEstimator returns the estimator for a given config
@@ -173,6 +176,24 @@ func (e WrappedEvmEstimator) GetFee(ctx context.Context, calldata []byte, feeLim
return
}
+func (e WrappedEvmEstimator) GetMaxCost(ctx context.Context, amount assets.Eth, calldata []byte, feeLimit uint32, maxFeePrice *assets.Wei, opts ...feetypes.Opt) (*big.Int, error) {
+ fees, gasLimit, err := e.GetFee(ctx, calldata, feeLimit, maxFeePrice, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ var gasPrice *assets.Wei
+ if e.EIP1559Enabled {
+ gasPrice = fees.DynamicFeeCap
+ } else {
+ gasPrice = fees.Legacy
+ }
+
+ fee := new(big.Int).Mul(gasPrice.ToInt(), big.NewInt(int64(gasLimit)))
+ amountWithFees := new(big.Int).Add(amount.ToInt(), fee)
+ return amountWithFees, nil
+}
+
func (e WrappedEvmEstimator) BumpFee(ctx context.Context, originalFee EvmFee, feeLimit uint32, maxFeePrice *assets.Wei, attempts []EvmPriorAttempt) (bumpedFee EvmFee, chainSpecificFeeLimit uint32, err error) {
// validate only 1 fee type is present
if (!originalFee.ValidDynamic() && originalFee.Legacy == nil) || (originalFee.ValidDynamic() && originalFee.Legacy != nil) {
diff --git a/core/chains/evm/gas/models_test.go b/core/chains/evm/gas/models_test.go
index 3e793d27348..048646a980c 100644
--- a/core/chains/evm/gas/models_test.go
+++ b/core/chains/evm/gas/models_test.go
@@ -2,6 +2,7 @@ package gas_test
import (
"context"
+ "math/big"
"testing"
"github.com/stretchr/testify/assert"
@@ -22,14 +23,14 @@ func TestWrappedEvmEstimator(t *testing.T) {
legacyFee := assets.NewWeiI(10)
dynamicFee := gas.DynamicFee{
FeeCap: assets.NewWeiI(20),
- TipCap: assets.NewWeiI(21),
+ TipCap: assets.NewWeiI(1),
}
e := mocks.NewEvmEstimator(t)
e.On("GetDynamicFee", mock.Anything, mock.Anything, mock.Anything).
- Return(dynamicFee, gasLimit, nil).Once()
+ Return(dynamicFee, gasLimit, nil).Twice()
e.On("GetLegacyGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything).
- Return(legacyFee, gasLimit, nil).Once()
+ Return(legacyFee, gasLimit, nil).Twice()
e.On("BumpDynamicFee", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
Return(dynamicFee, gasLimit, nil).Once()
e.On("BumpLegacyGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
@@ -92,4 +93,24 @@ func TestWrappedEvmEstimator(t *testing.T) {
}, 0, nil, nil)
assert.Error(t, err)
})
+
+ t.Run("GetMaxCost", func(t *testing.T) {
+ val := assets.NewEthValue(1)
+
+ // expect legacy fee data
+ dynamicFees := false
+ estimator := gas.NewWrappedEvmEstimator(e, dynamicFees)
+ total, err := estimator.GetMaxCost(ctx, val, nil, gasLimit, nil)
+ require.NoError(t, err)
+ fee := new(big.Int).Mul(legacyFee.ToInt(), big.NewInt(int64(gasLimit)))
+ assert.Equal(t, new(big.Int).Add(val.ToInt(), fee), total)
+
+ // expect dynamic fee data
+ dynamicFees = true
+ estimator = gas.NewWrappedEvmEstimator(e, dynamicFees)
+ total, err = estimator.GetMaxCost(ctx, val, nil, gasLimit, nil)
+ require.NoError(t, err)
+ fee = new(big.Int).Mul(dynamicFee.FeeCap.ToInt(), big.NewInt(int64(gasLimit)))
+ assert.Equal(t, new(big.Int).Add(val.ToInt(), fee), total)
+ })
}
diff --git a/core/web/evm_transfer_controller.go b/core/web/evm_transfer_controller.go
index 7e7df04160c..e5ceded58d4 100644
--- a/core/web/evm_transfer_controller.go
+++ b/core/web/evm_transfer_controller.go
@@ -9,7 +9,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/assets"
"github.com/smartcontractkit/chainlink/v2/core/chains/evm"
- "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas"
"github.com/smartcontractkit/chainlink/v2/core/logger/audit"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
@@ -94,26 +93,13 @@ func ValidateEthBalanceForTransfer(c *gin.Context, chain evm.Chain, fromAddr com
return errors.Errorf("balance is too low for this transaction to be executed: %v", balance)
}
- var fees gas.EvmFee
-
gasLimit := chain.Config().EVM().GasEstimator().LimitTransfer()
estimator := chain.GasEstimator()
- fees, gasLimit, err = estimator.GetFee(c, nil, gasLimit, chain.Config().EVM().GasEstimator().PriceMaxKey(fromAddr))
+ amountWithFees, err := estimator.GetMaxCost(c, amount, nil, gasLimit, chain.Config().EVM().GasEstimator().PriceMaxKey(fromAddr))
if err != nil {
- return errors.Wrap(err, "failed to estimate gas")
- }
-
- // TODO: support EIP-1559 transactions
- if fees.Legacy == nil {
- return errors.New("estimator did not return legacy tx fee estimates")
+ return err
}
- gasPrice := fees.Legacy
-
- // Creating a `Big` struct to avoid having a mutation on `tr.Amount` and hence affecting the value stored in the DB
- amountAsBig := utils.NewBig(amount.ToInt())
- fee := new(big.Int).Mul(gasPrice.ToInt(), big.NewInt(int64(gasLimit)))
- amountWithFees := new(big.Int).Add(amountAsBig.ToInt(), fee)
if balance.Cmp(amountWithFees) < 0 {
// ETH balance is less than the sent amount + fees
return errors.Errorf("balance is too low for this transaction to be executed: %v", balance)
diff --git a/core/web/evm_transfer_controller_test.go b/core/web/evm_transfer_controller_test.go
index ce4e8c5172f..c58b810e67c 100644
--- a/core/web/evm_transfer_controller_test.go
+++ b/core/web/evm_transfer_controller_test.go
@@ -252,3 +252,48 @@ func TestTransfersController_JSONBindingError(t *testing.T) {
cltest.AssertServerResponse(t, resp, http.StatusBadRequest)
}
+
+func TestTransfersController_CreateSuccess_eip1559(t *testing.T) {
+ t.Parallel()
+
+ key := cltest.MustGenerateRandomKey(t)
+
+ ethClient := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t)
+
+ balance, err := assets.NewEthValueS("200")
+ require.NoError(t, err)
+
+ ethClient.On("PendingNonceAt", mock.Anything, key.Address).Return(uint64(1), nil)
+ ethClient.On("BalanceAt", mock.Anything, key.Address, (*big.Int)(nil)).Return(balance.ToInt(), nil)
+
+ config := configtest2.NewGeneralConfig(t, func(c *chainlink.Config, s *chainlink.Secrets) {
+ c.EVM[0].GasEstimator.EIP1559DynamicFees = ptr(true)
+ c.EVM[0].GasEstimator.Mode = ptr("FixedPrice")
+ })
+
+ app := cltest.NewApplicationWithConfigAndKey(t, config, ethClient, key)
+ require.NoError(t, app.Start(testutils.Context(t)))
+
+ client := app.NewHTTPClient(cltest.APIEmailAdmin)
+
+ amount, err := assets.NewEthValueS("100")
+ require.NoError(t, err)
+
+ request := models.SendEtherRequest{
+ DestinationAddress: common.HexToAddress("0xFA01FA015C8A5332987319823728982379128371"),
+ FromAddress: key.Address,
+ Amount: amount,
+ }
+
+ body, err := json.Marshal(&request)
+ assert.NoError(t, err)
+
+ resp, cleanup := client.Post("/v2/transfers", bytes.NewBuffer(body))
+ t.Cleanup(cleanup)
+
+ errors := cltest.ParseJSONAPIErrors(t, resp.Body)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.Len(t, errors.Errors, 0)
+
+ cltest.AssertCount(t, app.GetSqlxDB(), "eth_txes", 1)
+}
From 3ceda48d7fa885cd32e7b3f508a1b84e830086c8 Mon Sep 17 00:00:00 2001
From: Aaron Lu <50029043+aalu1418@users.noreply.github.com>
Date: Wed, 16 Aug 2023 15:57:10 -0600
Subject: [PATCH 12/40] add changelog for fix in #10196 (#10235)
* add changelog for fix in #10196
* specify which flag for overriding in changelog
---
docs/CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 7c995a64a60..5746682f930 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## 2.4.0 - UNRELEASED
### Fixed
- Updated `v2/keys/evm` and `v2/keys/eth` routes to return 400 and 404 status codes where appropriate. Previously 500s were returned when requested resources were not found or client requests could not be parsed.
+- Fixed withdrawing ETH from CL node for EIP1559 enabled chains. Previously would error out unless validation was overridden with `allowHigherAmounts`.
### Added
- Added the ability to specify and merge fields from multiple secrets files. Overrides of fields and keys are not allowed.
From a76c31cf1058ca014c3bf58cf228accf14dcb9fe Mon Sep 17 00:00:00 2001
From: Patrick
Date: Mon, 7 Aug 2023 12:11:47 -0400
Subject: [PATCH 13/40] bug/2326-simple-passwords: re-enabling simple passwords
restriction for prod (#10021)
* bug/2326-simple-passwords: re-enabling simple passwords restriction for prod
* strong password for tests
---------
Co-authored-by: skudasov
---
core/config/toml/types.go | 2 +
core/config/toml/types_test.go | 77 ++++++++++++++++++++++++++++++++++
docs/CHANGELOG.md | 3 +-
3 files changed, 81 insertions(+), 1 deletion(-)
diff --git a/core/config/toml/types.go b/core/config/toml/types.go
index e4dd190d9a5..471d444339c 100644
--- a/core/config/toml/types.go
+++ b/core/config/toml/types.go
@@ -148,6 +148,8 @@ func validateDBURL(dbURI url.URL) error {
func (d *DatabaseSecrets) ValidateConfig() (err error) {
if d.URL == nil || (*url.URL)(d.URL).String() == "" {
err = multierr.Append(err, configutils.ErrEmpty{Name: "URL", Msg: "must be provided and non-empty"})
+ } else if *d.AllowSimplePasswords && build.IsProd() {
+ err = multierr.Append(err, configutils.ErrInvalid{Name: "AllowSimplePasswords", Value: true, Msg: "insecure configs are not allowed on secure builds"})
} else if !*d.AllowSimplePasswords {
if verr := validateDBURL((url.URL)(*d.URL)); verr != nil {
err = multierr.Append(err, configutils.ErrInvalid{Name: "URL", Value: "*****", Msg: dbURLPasswordComplexity(verr)})
diff --git a/core/config/toml/types_test.go b/core/config/toml/types_test.go
index a85138c91b3..e2eb5eed815 100644
--- a/core/config/toml/types_test.go
+++ b/core/config/toml/types_test.go
@@ -2,10 +2,13 @@ package toml
import (
"fmt"
+ "net/url"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/smartcontractkit/chainlink/v2/core/build"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils"
@@ -97,3 +100,77 @@ func Test_validateDBURL(t *testing.T) {
})
}
}
+
+func TestValidateConfig(t *testing.T) {
+ validUrl := models.URL(url.URL{Scheme: "https", Host: "localhost"})
+ validSecretURL := *models.NewSecretURL(&validUrl)
+
+ invalidEmptyUrl := models.URL(url.URL{})
+ invalidEmptySecretURL := *models.NewSecretURL(&invalidEmptyUrl)
+
+ invalidBackupURL := models.URL(url.URL{Scheme: "http", Host: "localhost"})
+ invalidBackupSecretURL := *models.NewSecretURL(&invalidBackupURL)
+
+ tests := []struct {
+ name string
+ input *DatabaseSecrets
+ skip bool
+ expectedErrContains []string
+ }{
+ {
+ name: "Nil URL",
+ input: &DatabaseSecrets{
+ URL: nil,
+ },
+ expectedErrContains: []string{"URL: empty: must be provided and non-empty"},
+ },
+ {
+ name: "Empty URL",
+ input: &DatabaseSecrets{
+ URL: &invalidEmptySecretURL,
+ },
+ expectedErrContains: []string{"URL: empty: must be provided and non-empty"},
+ },
+ {
+ name: "Insecure Password in Production",
+ input: &DatabaseSecrets{
+ URL: &validSecretURL,
+ AllowSimplePasswords: &[]bool{true}[0],
+ },
+ skip: !build.IsProd(),
+ expectedErrContains: []string{"insecure configs are not allowed on secure builds"},
+ },
+ {
+ name: "Invalid Backup URL with Simple Passwords Not Allowed",
+ input: &DatabaseSecrets{
+ URL: &validSecretURL,
+ BackupURL: &invalidBackupSecretURL,
+ AllowSimplePasswords: &[]bool{false}[0],
+ },
+ expectedErrContains: []string{"missing or insufficiently complex password"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // needed while -tags test is supported
+ if tt.skip {
+ t.SkipNow()
+ }
+ err := tt.input.ValidateConfig()
+ if err == nil && len(tt.expectedErrContains) > 0 {
+ t.Errorf("expected errors but got none")
+ return
+ }
+
+ if err != nil {
+ errStr := err.Error()
+ for _, expectedErrSubStr := range tt.expectedErrContains {
+ if !strings.Contains(errStr, expectedErrSubStr) {
+ t.Errorf("expected error to contain substring %q but got %v", expectedErrSubStr, errStr)
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 5746682f930..0351648a50e 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,7 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
-...
+### Changed
+- Simple passwords are no longer allowed for production builds
## 2.4.0 - UNRELEASED
### Fixed
From 76fb85cba425dbfe229db82a9ad7b18265a44f8c Mon Sep 17 00:00:00 2001
From: patrickhuie19
Date: Thu, 17 Aug 2023 10:01:57 -0400
Subject: [PATCH 14/40] fixing changelog
---
docs/CHANGELOG.md | 2 --
1 file changed, 2 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 0351648a50e..e938159a48d 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,8 +9,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
-### Changed
-- Simple passwords are no longer allowed for production builds
## 2.4.0 - UNRELEASED
### Fixed
From c0865c6dc3cf2e0435b8edd8201f8ce21e75c603 Mon Sep 17 00:00:00 2001
From: chainchad <96362174+chainchad@users.noreply.github.com>
Date: Mon, 21 Aug 2023 14:09:49 -0400
Subject: [PATCH 15/40] Finalize date on changelog for 2.4.0
---
docs/CHANGELOG.md | 42 +++++++++++++++++++++++++++---------------
1 file changed, 27 insertions(+), 15 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index e938159a48d..5c53da2fee1 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,29 +9,36 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+
+
+## 2.4.0 - 2023-08-21
-## 2.4.0 - UNRELEASED
### Fixed
-- Updated `v2/keys/evm` and `v2/keys/eth` routes to return 400 and 404 status codes where appropriate. Previously 500s were returned when requested resources were not found or client requests could not be parsed.
+
+- Updated `v2/keys/evm` and `v2/keys/eth` routes to return 400 and 404 status codes where appropriate. Previously 500s were returned when requested resources were not found or client requests could not be parsed.
- Fixed withdrawing ETH from CL node for EIP1559 enabled chains. Previously would error out unless validation was overridden with `allowHigherAmounts`.
### Added
+
- Added the ability to specify and merge fields from multiple secrets files. Overrides of fields and keys are not allowed.
### Upcoming Required Configuration Change
+
- Starting in 2.6.0, chainlink nodes will no longer allow insecure configuration for production builds. Any TOML configuration that sets the following line will fail validation checks in `node start` or `node validate`:
+
```
AllowSimplePasswords=true
```
-- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986) for special character escape rules.
-
+- To migrate on production builds, update the database password set in Database.URL to be 16 - 50 characters without leading or trailing whitespace. URI parsing rules apply to the chosen password - refer to [RFC 3986](https://datatracker.ietf.org/doc/html/rfc3986) for special character escape rules.
## 2.3.0 - 2023-07-28
### Added
+
- Add a new field called `Order` (range from 1 to 100) to `EVM.Nodes` that is used for the `PriorityLevel` node selector and also as a tie-breaker for `HighestHead` and `TotalDifficulty`. `Order` levels are considered in ascending order. If not defined it will default to `Order = 100` (last level).
- Added new node selection mode called `PriorityLevel` for EVM, it is a tiered round-robin in ascending order of the`Order` field. Example:
+
```
[EVM.NodePool]
SelectionMode = 'PriorityLevel'
@@ -40,8 +47,9 @@ SelectionMode = 'PriorityLevel'
Name = '...'
WSURL = '...'
HTTPURL = '...'
-Order = 5
+Order = 5
```
+
- The config keys `WebServer.StartTimeout` and `WebServer.HTTPMaxSize`. These keys respectively set a timeout for the node server to
start and set the max request size for HTTP requests. Previously these attributes were set by
`JobPipeline.DefaultHTTPLimit`/`JobPipeline.DefaultHTTPTimeout`. To migrate to these new fields, set their values to be identical to
@@ -60,18 +68,20 @@ Order = 5
- Add two new config parameters `WebServer.ListenIP` and `WebServer.TLS.ListenIP` which allows binding Chainlink HTTP/HTTPS servers to a particular IP. The default is '0.0.0.0' which listens to all IP addresses (same behavior as before). Set to '127.0.0.1' to only allow connections from the local machine (this can be handy for local development).
- Add several new metrics for mercury feeds, related to WSRPC connections:
- - `mercury_transmit_timeout_count`
- - `mercury_dial_count`
- - `mercury_dial_success_count`
- - `mercury_dial_error_count`
- - `mercury_connection_reset_count`
+ - `mercury_transmit_timeout_count`
+ - `mercury_dial_count`
+ - `mercury_dial_success_count`
+ - `mercury_dial_error_count`
+ - `mercury_connection_reset_count`
Node operators may wish to add alerting based around these metrics.
### Fixed
+
- Fixed a bug in the `nodes xxx list` command that caused results to not be displayed correctly
### Changed
+
- Assumption violations for MaxFeePerGas >= BaseFeePerGas and MaxFeePerGas >= MaxPriorityFeePerGas in EIP-1559 effective gas price calculation will now use a gas price if specified
- Config validation now enforces protection against duplicate chain ids and node fields per provided TOML file. Duplicates accross multiple configuration files are still valid. If you have specified duplicate chain ids or nodes in a given configuration file, this change will error out of all `node` subcommands.
- Restricted scope of the `Evm.GasEstimator.LimitJobType.OCR`, `OCR.DefaultTransactionQueueDepth`, and `OCR.SimulateTransactions` settings so they
@@ -79,6 +89,7 @@ Node operators may wish to add alerting based around these metrics.
want your settings to apply to OCR2.
### Removed
+
- Legacy chain types Optimism and Optimism2. OptimismBedrock is now used to handle Optimism's special cases.
- Optimism Kovan configurations along with legacy error messages.
@@ -91,16 +102,18 @@ Node operators may wish to add alerting based around these metrics.
- New prometheus metric for mercury transmit queue: `mercury_transmit_queue_load`. This is a gauge, scoped by feed ID, that measures how many pending transmissions are in the queue. This should generally speaking be small (< 10 or so). Nops may wish to add alerting if this exceeds some amount.
- Experimental support of runtime process isolation for Solana data feeds. Requires plugin binaries to be installed and
configured via the env vars `CL_SOLANA_CMD` and `CL_MEDIAN_CMD`. See [plugins/README.md](../plugins/README.md).
+
### Fixed
- Fixed a bug which made it impossible to re-send the same transaction after abandoning it while manually changing the nonce.
### Changed
+
- Set default for EVM.GasEstimator.BumpTxDepth to EVM.Transactions.MaxInFlight.
- Bumped batch size defaults for EVM specific configuration. If you are overriding any of these fields in your local config, please consider if it is necessary:
- - `LogBackfillBatchSize = 1000`
- - `RPCDefaultBatchSize = 250`
- - `GasEstimator.BatchSize = 25`
+ - `LogBackfillBatchSize = 1000`
+ - `RPCDefaultBatchSize = 250`
+ - `GasEstimator.BatchSize = 25`
- Dropped support for Development Mode configuration. `CL_DEV` is now ignored on production builds.
- Updated Docker image's PostgreSQL client (used for backups) to v15 in order to support PostgreSQL v15 servers.
@@ -1871,8 +1884,7 @@ transactions that started hitting this limit and instead continue to resubmit
at the highest price that worked.
Node operators should check their geth nodes and remove this cap if configured,
-you can do this by running your geth node with `--rpc.gascap=0
---rpc.txfeecap=0` or setting these values in your config toml.
+you can do this by running your geth node with `--rpc.gascap=0 --rpc.txfeecap=0` or setting these values in your config toml.
- Make head backfill asynchronous. This should eliminate some harmless but
annoying errors related to backfilling heads, logged on startup and
From 6b0124e981b66685b5751d4f8081378e4f296d1f Mon Sep 17 00:00:00 2001
From: chainchad <96362174+chainchad@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:52:51 -0400
Subject: [PATCH 16/40] Bump version and update CHANGELOG for core v2.6.0
---
VERSION | 2 +-
docs/CHANGELOG.md | 35 +++++++++++++++++------------------
2 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/VERSION b/VERSION
index 437459cd94c..e70b4523ae7 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.5.0
+2.6.0
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 7f0b80ebcfe..299e248428c 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,27 +9,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+...
+
+## 2.6.0 - UNRELEASED
+
### Added
-- Simple password use in production builds is now disallowed - nodes with this configuration will not boot and will not pass config validation.
+- Simple password use in production builds is now disallowed - nodes with this configuration will not boot and will not pass config validation.
- Helper migrations function for injecting env vars into goose migrations. This was done to inject chainID into evm chain id not null in specs migrations.
- OCR2 jobs now support querying the state contract for configurations if it has been deployed. This can help on chains such as BSC which "manage" state bloat by arbitrarily deleting logs older than a certain date. In this case, if logs are missing we will query the contract directly and retrieve the latest config from chain state. Chainlink will perform no extra RPC calls unless the job spec has this feature explicitly enabled. On chains that require this, nops may see an increase in RPC calls. This can be enabled for OCR2 jobs by specifying `ConfigContractAddress` in the relay config TOML.
### Removed
-- Removed support for sending telemetry to the deprecated Explorer service. All nodes will have to remove `Explorer` related keys from TOML configuration and env vars.
+- Removed support for sending telemetry to the deprecated Explorer service. All nodes will have to remove `Explorer` related keys from TOML configuration and env vars.
- Removed default evmChainID logic where evmChainID was implicitly injected into the jobspecs based on node EVM chainID toml configuration. All newly created jobs(that have evmChainID field) will have to explicitly define evmChainID in the jobspec.
- Removed keyset migration that migrated v1 keys to v2 keys. All keys should've been migrated by now, and we don't permit creation of new v1 keys anymore
- All nodes will have to remove the following secret configurations:
- * `Explorer.AccessKey`
- * `Explorer.Secret`
-
- All nodes will have to remove the following configuration field: `ExplorerURL`
+All nodes will have to remove the following secret configurations:
+
+- `Explorer.AccessKey`
+- `Explorer.Secret`
+
+All nodes will have to remove the following configuration field: `ExplorerURL`
### Fixed
+
- Unauthenticated users executing CLI commands previously generated a confusing error log, which is now removed:
-```[ERROR] Error in transaction, rolling back: session missing or expired, please login again pg/transaction.go:118 ```
+ `[ERROR] Error in transaction, rolling back: session missing or expired, please login again pg/transaction.go:118 `
- Fixed a bug that was preventing job runs to be displayed when the job `chainID` was disabled.
- `chainlink txs evm create` returns a transaction hash for the attempted transaction in the CLI. Previously only the sender, recipient and `unstarted` state were returned.
- Fixed a bug where `evmChainId` is requested instead of `id` or `evm-chain-id` in CLI error verbatim
@@ -40,19 +46,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## 2.5.0 - 2023-09-13
-- Unauthenticated users executing CLI commands previously generated a confusing error log, which is now removed:
- ```
- [ERROR] Error in transaction, rolling back: session missing or expired, please login again pg/transaction.go:118
- ```
-- Fixed a bug that was preventing job runs to be displayed when the job `chainID` was disabled.
-- `chainlink txs evm create` returns a transaction hash for the attempted transaction in the CLI. Previously only the sender, receipient and `unstarted` state were returned.
-
### Added
- New prometheus metrics for mercury:
- - `mercury_price_feed_missing`
- - `mercury_price_feed_errors`
- Nops may wish to add alerting on these.
+ - `mercury_price_feed_missing`
+ - `mercury_price_feed_errors`
+ Nops may wish to add alerting on these.
### Upcoming Required Configuration Change
From 29735666263c08bcb925c9b349077f5318a5bbb0 Mon Sep 17 00:00:00 2001
From: Patrick
Date: Wed, 27 Sep 2023 14:14:41 -0300
Subject: [PATCH 17/40] core/plugins: fix logger field reference (#10815)
(#10817)
(cherry picked from commit c1348edea7f0a860ee80493886994eb07d992ae4)
Co-authored-by: Jordan Krage
---
plugins/server.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugins/server.go b/plugins/server.go
index b1d43612480..3b0348a68b4 100644
--- a/plugins/server.go
+++ b/plugins/server.go
@@ -68,7 +68,7 @@ func (s *Server) start() error {
if err != nil {
return fmt.Errorf("error getting environment configuration: %w", err)
}
- s.PromServer = NewPromServer(envCfg.PrometheusPort(), s.lggr)
+ s.PromServer = NewPromServer(envCfg.PrometheusPort(), s.Logger)
err = s.PromServer.Start()
if err != nil {
return fmt.Errorf("error starting prometheus server: %w", err)
From d02ebbd519c3824865d0265e8a4de9da86bfdbcf Mon Sep 17 00:00:00 2001
From: Akshay Aggarwal
Date: Wed, 27 Sep 2023 19:05:54 +0100
Subject: [PATCH 18/40] Fix automation - mercury v0.3 response decoding
(#10812) (#10818)
* Fix automation - mercury v0.3 response decoding
* update
(cherry picked from commit f7d0b38b6379d5cf3399b9d84064752a9ad4cdee)
---
.../ocr2keeper/evm21/streams_lookup.go | 15 ++++-
.../ocr2keeper/evm21/streams_lookup_test.go | 60 ++++++++++++++-----
2 files changed, 57 insertions(+), 18 deletions(-)
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
index 2155b383002..6c1789de9c4 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup.go
@@ -66,10 +66,10 @@ type MercuryV03Response struct {
}
type MercuryV03Report struct {
- FeedID []byte `json:"feedID"` // feed id in hex
+ FeedID string `json:"feedID"` // feed id in hex encoded
ValidFromTimestamp uint32 `json:"validFromTimestamp"`
ObservationsTimestamp uint32 `json:"observationsTimestamp"`
- FullReport []byte `json:"fullReport"` // the actual mercury report of this feed, can be sent to verifier
+ FullReport string `json:"fullReport"` // the actual hex encoded mercury report of this feed, can be sent to verifier
}
type MercuryData struct {
@@ -528,6 +528,7 @@ func (r *EvmRegistry) multiFeedsRequest(ctx context.Context, ch chan<- MercuryDa
} else if resp.StatusCode == 420 {
// in 0.3, this will happen when missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds
retryable = false
+ state = encoding.InvalidMercuryRequest
return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds", sl.time.String(), sl.upkeepId.String(), resp.StatusCode)
} else if resp.StatusCode != http.StatusOK {
retryable = false
@@ -549,13 +550,21 @@ func (r *EvmRegistry) multiFeedsRequest(ctx context.Context, ch chan<- MercuryDa
// hence, retry in this case. retry will help when we send a very new timestamp and reports are not yet generated
if len(response.Reports) != len(sl.feeds) {
// TODO: AUTO-5044: calculate what reports are missing and log a warning
+ lggr.Warnf("at timestamp %s upkeep %s mercury v0.3 server retruned 200 status with %d reports while we requested %d feeds, treating as 404 (not found) and retrying", sl.time.String(), sl.upkeepId.String(), len(response.Reports), len(sl.feeds))
retryable = true
state = encoding.MercuryFlakyFailure
return fmt.Errorf("%d", http.StatusNotFound)
}
var reportBytes [][]byte
for _, rsp := range response.Reports {
- reportBytes = append(reportBytes, rsp.FullReport)
+ b, err := hexutil.Decode(rsp.FullReport)
+ if err != nil {
+ lggr.Warnf("at timestamp %s upkeep %s failed to decode reportBlob %s: %v", sl.time.String(), sl.upkeepId.String(), rsp.FullReport, err)
+ retryable = false
+ state = encoding.InvalidMercuryResponse
+ return err
+ }
+ reportBytes = append(reportBytes, b)
}
ch <- MercuryData{
Index: 0,
diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
index 42aeecb64f6..f59cec18c1e 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evm21/streams_lookup_test.go
@@ -731,16 +731,16 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
response: &MercuryV03Response{
Reports: []MercuryV03Report{
{
- FeedID: hexutil.MustDecode("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123456,
ObservationsTimestamp: 123456,
- FullReport: hexutil.MustDecode("0xab2123dc00000012"),
+ FullReport: "0xab2123dc00000012",
},
{
- FeedID: hexutil.MustDecode("0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123458,
ObservationsTimestamp: 123458,
- FullReport: hexutil.MustDecode("0xab2123dc00000016"),
+ FullReport: "0xab2123dc00000016",
},
},
},
@@ -761,20 +761,49 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
response: &MercuryV03Response{
Reports: []MercuryV03Report{
{
- FeedID: hexutil.MustDecode("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123456,
ObservationsTimestamp: 123456,
- FullReport: hexutil.MustDecode("0xab2123dc00000012"),
+ FullReport: "0xab2123dc00000012",
},
{
- FeedID: hexutil.MustDecode("0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123458,
ObservationsTimestamp: 123458,
- FullReport: hexutil.MustDecode("0xab2123dc00000019"),
+ FullReport: "0xab2123dc00000019",
},
},
},
},
+ {
+ name: "failure - fail to decode reportBlob",
+ lookup: &StreamsLookup{
+ feedParamKey: feedIDs,
+ feeds: []string{"0x4554482d5553442d415242495452554d2d544553544e45540000000000000000", "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"},
+ timeParamKey: timestamp,
+ time: big.NewInt(123456),
+ upkeepId: upkeepId,
+ },
+ response: &MercuryV03Response{
+ Reports: []MercuryV03Report{
+ {
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
+ ValidFromTimestamp: 123456,
+ ObservationsTimestamp: 123456,
+ FullReport: "qerwiu", // invalid hex blob
+ },
+ {
+ FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000",
+ ValidFromTimestamp: 123458,
+ ObservationsTimestamp: 123458,
+ FullReport: "0xab2123dc00000016",
+ },
+ },
+ },
+ statusCode: http.StatusOK,
+ retryable: false,
+ errorMessage: "All attempts fail:\n#1: hex string without 0x prefix",
+ },
{
name: "failure - returns retryable",
lookup: &StreamsLookup{
@@ -839,26 +868,26 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
firstResponse: &MercuryV03Response{
Reports: []MercuryV03Report{
{
- FeedID: hexutil.MustDecode("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123456,
ObservationsTimestamp: 123456,
- FullReport: hexutil.MustDecode("0xab2123dc00000012"),
+ FullReport: "0xab2123dc00000012",
},
},
},
response: &MercuryV03Response{
Reports: []MercuryV03Report{
{
- FeedID: hexutil.MustDecode("0x4554482d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4554482d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123456,
ObservationsTimestamp: 123456,
- FullReport: hexutil.MustDecode("0xab2123dc00000012"),
+ FullReport: "0xab2123dc00000012",
},
{
- FeedID: hexutil.MustDecode("0x4254432d5553442d415242495452554d2d544553544e45540000000000000000"),
+ FeedID: "0x4254432d5553442d415242495452554d2d544553544e45540000000000000000",
ValidFromTimestamp: 123458,
ObservationsTimestamp: 123458,
- FullReport: hexutil.MustDecode("0xab2123dc00000019"),
+ FullReport: "0xab2123dc00000019",
},
},
},
@@ -930,7 +959,8 @@ func TestEvmRegistry_MultiFeedRequest(t *testing.T) {
assert.Nil(t, m.Error)
var reports [][]byte
for _, rsp := range tt.response.Reports {
- reports = append(reports, rsp.FullReport)
+ b, _ := hexutil.Decode(rsp.FullReport)
+ reports = append(reports, b)
}
assert.Equal(t, reports, m.Bytes)
}
From 8392f18c79cdae0c2fc44ec89c5fe2b0f1484a2f Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri <180277+snehaagni@users.noreply.github.com>
Date: Wed, 18 Oct 2023 12:51:32 -0700
Subject: [PATCH 19/40] Finalize date on changelog for 2.6.0 (#10996)
Signed-off-by: Sneha Agnihotri
---
docs/CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 299e248428c..4032ce80687 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,7 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.6.0 - UNRELEASED
+
+
+## 2.6.0 - 2023-10-18
### Added
@@ -42,8 +44,6 @@ All nodes will have to remove the following configuration field: `ExplorerURL`
- Fixed a bug that would cause the node to shut down while performing backup
- Fixed health checker to include more services in the prometheus `health` metric and HTTP `/health` endpoint
-
-
## 2.5.0 - 2023-09-13
### Added
From 7248dc7b5cbac72417cea7b6b15aa80e762467ae Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Tue, 24 Oct 2023 14:29:15 -0700
Subject: [PATCH 20/40] Bump version and update CHANGELOG for core v2.7.0
---
VERSION | 2 +-
docs/CHANGELOG.md | 4 ++++
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index e70b4523ae7..24ba9a38de6 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.6.0
+2.7.0
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 469cd0d1686..44d018769ec 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -9,6 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [dev]
+...
+
+## 2.7.0 - UNRELEASED
+
### Added
- Added new configuration field named `LeaseDuration` for `EVM.NodePool` that will periodically check if internal subscriptions are connected to the "best" (as defined by the `SelectionMode`) node and switch to it if necessary. Setting this value to `0s` will disable this feature.
From 9dba1e479909f1e4f9a6252ae91a9124680304ec Mon Sep 17 00:00:00 2001
From: Jordan Krage
Date: Fri, 27 Oct 2023 08:52:16 -0500
Subject: [PATCH 21/40] core: log a warning when deprecated P2P.V1 config is
set in TOML (#11073)
* core: log a warning when deprecated P2P.V1 config is set in TOML
* change default P2P from V1 to V2
* bump operator ui
(cherry picked from commit 5808e734cf024a5e8c5450e939e1f2d5b3cba546)
---
.../evm/config/mocks/chain_scoped_config.go | 6 +-
core/cmd/shell_local.go | 5 +-
core/config/app_config.go | 2 +-
core/config/docs/core.toml | 7 +-
core/scripts/chaincli/handler/handler.go | 4 +-
.../common/vrf/docker/toml-config/base.toml | 1 -
core/scripts/ocr2vrf/util.go | 13 +-
core/services/chainlink/config.go | 44 +++
core/services/chainlink/config_general.go | 14 +-
core/services/chainlink/config_p2p_test.go | 7 +-
core/services/chainlink/config_test.go | 39 ++-
.../chainlink/mocks/general_config.go | 6 +-
.../testdata/config-empty-effective.toml | 4 +-
.../chainlink/testdata/config-full.toml | 4 +-
.../config-multi-chain-effective.toml | 4 +-
.../ocr2/plugins/mercury/helpers_test.go | 4 -
core/utils/config/validate.go | 14 +
.../testdata/config-empty-effective.toml | 4 +-
core/web/resolver/testdata/config-full.toml | 4 +-
.../config-multi-chain-effective.toml | 4 +-
docs/CHANGELOG.md | 10 +-
docs/CONFIG.md | 12 +-
integration-tests/benchmark/keeper_test.go | 1 -
.../chaos/automation_chaos_test.go | 1 -
integration-tests/config/config.go | 6 +-
integration-tests/performance/ocr_test.go | 4 +
.../reorg/automation_reorg_test.go | 6 +-
integration-tests/smoke/automation_test.go | 1 -
integration-tests/smoke/keeper_test.go | 2 +-
integration-tests/types/config/node/core.go | 11 +-
.../types/config/node/defaults/sample.toml | 1 -
testdata/scripts/node/validate/default.txtar | 4 +-
.../disk-based-logging-disabled.txtar | 4 +-
.../validate/disk-based-logging-no-dir.txtar | 4 +-
.../node/validate/disk-based-logging.txtar | 4 +-
testdata/scripts/node/validate/invalid.txtar | 4 +-
testdata/scripts/node/validate/valid.txtar | 4 +-
testdata/scripts/node/validate/warnings.txtar | 279 ++++++++++++++++++
38 files changed, 463 insertions(+), 85 deletions(-)
create mode 100644 testdata/scripts/node/validate/warnings.txtar
diff --git a/core/chains/evm/config/mocks/chain_scoped_config.go b/core/chains/evm/config/mocks/chain_scoped_config.go
index 0854b82165a..cb18282f495 100644
--- a/core/chains/evm/config/mocks/chain_scoped_config.go
+++ b/core/chains/evm/config/mocks/chain_scoped_config.go
@@ -252,9 +252,9 @@ func (_m *ChainScopedConfig) Log() coreconfig.Log {
return r0
}
-// LogConfiguration provides a mock function with given fields: log
-func (_m *ChainScopedConfig) LogConfiguration(log coreconfig.LogfFn) {
- _m.Called(log)
+// LogConfiguration provides a mock function with given fields: log, warn
+func (_m *ChainScopedConfig) LogConfiguration(log coreconfig.LogfFn, warn coreconfig.LogfFn) {
+ _m.Called(log, warn)
}
// Mercury provides a mock function with given fields:
diff --git a/core/cmd/shell_local.go b/core/cmd/shell_local.go
index f578604db33..401375238d8 100644
--- a/core/cmd/shell_local.go
+++ b/core/cmd/shell_local.go
@@ -290,7 +290,7 @@ func (s *Shell) runNode(c *cli.Context) error {
s.Config.SetPasswords(pwd, vrfpwd)
- s.Config.LogConfiguration(lggr.Debugf)
+ s.Config.LogConfiguration(lggr.Debugf, lggr.Warnf)
if err := s.Config.Validate(); err != nil {
return errors.Wrap(err, "config validation failed")
@@ -689,7 +689,8 @@ var errDBURLMissing = errors.New("You must set CL_DATABASE_URL env variable or p
// ConfigValidate validate the client configuration and pretty-prints results
func (s *Shell) ConfigFileValidate(_ *cli.Context) error {
- s.Config.LogConfiguration(func(f string, params ...any) { fmt.Printf(f, params...) })
+ fn := func(f string, params ...any) { fmt.Printf(f, params...) }
+ s.Config.LogConfiguration(fn, fn)
if err := s.configExitErr(s.Config.Validate); err != nil {
return err
}
diff --git a/core/config/app_config.go b/core/config/app_config.go
index ab8d9559673..648939b871b 100644
--- a/core/config/app_config.go
+++ b/core/config/app_config.go
@@ -28,7 +28,7 @@ type AppConfig interface {
Validate() error
ValidateDB() error
- LogConfiguration(log LogfFn)
+ LogConfiguration(log, warn LogfFn)
SetLogLevel(lvl zapcore.Level) error
SetLogSQL(logSQL bool)
SetPasswords(keystore, vrf *string)
diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml
index 18f5810adcc..1ca4c656a7f 100644
--- a/core/config/docs/core.toml
+++ b/core/config/docs/core.toml
@@ -359,6 +359,8 @@ TraceLogging = false # Default
# automatically fall back to V1. If V2 starts working again later, it will automatically be preferred again. This is useful
# for migrating networks without downtime. Note that the two networking stacks _must not_ be configured to bind to the same IP/port.
#
+# Note: P2P.V1 is deprecated will be removed in the future.
+#
# All nodes in the OCR network should share the same networking stack.
[P2P]
# IncomingMessageBufferSize is the per-remote number of incoming
@@ -377,9 +379,10 @@ PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw' # Example
# TraceLogging enables trace level logging.
TraceLogging = false # Default
+# P2P.V1 is deprecated and will be removed in a future version.
[P2P.V1]
# Enabled enables P2P V1.
-Enabled = true # Default
+Enabled = false # Default
# AnnounceIP should be set as the externally reachable IP address of the Chainlink node.
AnnounceIP = '1.2.3.4' # Example
# AnnouncePort should be set as the externally reachable port of the Chainlink node.
@@ -423,7 +426,7 @@ PeerstoreWriteInterval = '5m' # Default
[P2P.V2]
# Enabled enables P2P V2.
# Note: V1.Enabled is true by default, so it must be set false in order to run V2 only.
-Enabled = false # Default
+Enabled = true # Default
# AnnounceAddresses is the addresses the peer will advertise on the network in `host:port` form as accepted by the TCP version of Go’s `net.Dial`.
# The addresses should be reachable by other nodes on the network. When attempting to connect to another node,
# a node will attempt to dial all of the other node’s AnnounceAddresses in round-robin fashion.
diff --git a/core/scripts/chaincli/handler/handler.go b/core/scripts/chaincli/handler/handler.go
index c51792f9adc..f72e94605d4 100644
--- a/core/scripts/chaincli/handler/handler.go
+++ b/core/scripts/chaincli/handler/handler.go
@@ -64,9 +64,7 @@ HTTPSPort = 0
LogPoller = true
[OCR2]
Enabled = true
-[P2P]
-[P2P.V2]
-Enabled = true
+
[Keeper]
TurnLookBack = 0
[[EVM]]
diff --git a/core/scripts/common/vrf/docker/toml-config/base.toml b/core/scripts/common/vrf/docker/toml-config/base.toml
index 0bb83beb94a..39aab2e63ab 100644
--- a/core/scripts/common/vrf/docker/toml-config/base.toml
+++ b/core/scripts/common/vrf/docker/toml-config/base.toml
@@ -26,6 +26,5 @@ HTTPSPort = 0
[P2P]
[P2P.V2]
-Enabled = true
AnnounceAddresses = ['0.0.0.0:6690']
ListenAddresses = ['0.0.0.0:6690']
diff --git a/core/scripts/ocr2vrf/util.go b/core/scripts/ocr2vrf/util.go
index f8d104a5f3e..a2ff55524d3 100644
--- a/core/scripts/ocr2vrf/util.go
+++ b/core/scripts/ocr2vrf/util.go
@@ -14,16 +14,17 @@ import (
"github.com/ethereum/go-ethereum/common"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
+ "github.com/urfave/cli"
+ "go.dedis.ch/kyber/v3"
+ "go.dedis.ch/kyber/v3/group/edwards25519"
+ "go.dedis.ch/kyber/v3/pairing"
+
"github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/smartcontractkit/ocr2vrf/altbn_128"
"github.com/smartcontractkit/ocr2vrf/dkg"
"github.com/smartcontractkit/ocr2vrf/ocr2vrf"
ocr2vrftypes "github.com/smartcontractkit/ocr2vrf/types"
- "github.com/urfave/cli"
- "go.dedis.ch/kyber/v3"
- "go.dedis.ch/kyber/v3/group/edwards25519"
- "go.dedis.ch/kyber/v3/pairing"
"github.com/smartcontractkit/chainlink/v2/core/cmd"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/authorized_forwarder"
@@ -43,11 +44,7 @@ var (
g1 = suite.G1()
g2 = suite.G2()
tomlConfigTemplate = `
- [P2P.V1]
- Enabled = false
-
[P2P.V2]
- Enabled = true
ListenAddresses = ["127.0.0.1:8000"]
[Feature]
diff --git a/core/services/chainlink/config.go b/core/services/chainlink/config.go
index 62be789f859..26e2d539bac 100644
--- a/core/services/chainlink/config.go
+++ b/core/services/chainlink/config.go
@@ -52,6 +52,50 @@ func (c *Config) TOMLString() (string, error) {
return string(b), nil
}
+// deprecationWarnings returns an error if the Config contains deprecated fields.
+// This is typically used before defaults have been applied, with input from the user.
+func (c *Config) deprecationWarnings() (err error) {
+ if c.P2P.V1 != (toml.P2PV1{}) {
+ err = multierr.Append(err, config.ErrDeprecated{Name: "P2P.V1"})
+ var err2 error
+ if c.P2P.V1.AnnounceIP != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "AnnounceIP"})
+ }
+ if c.P2P.V1.AnnouncePort != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "AnnouncePort"})
+ }
+ if c.P2P.V1.BootstrapCheckInterval != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "BootstrapCheckInterval"})
+ }
+ if c.P2P.V1.DefaultBootstrapPeers != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "DefaultBootstrapPeers"})
+ }
+ if c.P2P.V1.DHTAnnouncementCounterUserPrefix != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "DHTAnnouncementCounterUserPrefix"})
+ }
+ if c.P2P.V1.DHTLookupInterval != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "DHTLookupInterval"})
+ }
+ if c.P2P.V1.ListenIP != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "ListenIP"})
+ }
+ if c.P2P.V1.ListenPort != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "ListenPort"})
+ }
+ if c.P2P.V1.NewStreamTimeout != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "NewStreamTimeout"})
+ }
+ if c.P2P.V1.PeerstoreWriteInterval != nil {
+ err2 = multierr.Append(err2, config.ErrDeprecated{Name: "PeerstoreWriteInterval"})
+ }
+ err2 = config.NamedMultiErrorList(err2, "P2P.V1")
+ err = multierr.Append(err, err2)
+ }
+ return
+}
+
+// Validate returns an error if the Config is not valid for use, as-is.
+// This is typically used after defaults have been applied.
func (c *Config) Validate() error {
if err := config.Validate(c); err != nil {
return fmt.Errorf("invalid configuration: %w", err)
diff --git a/core/services/chainlink/config_general.go b/core/services/chainlink/config_general.go
index 8e3dc100a44..6243146e91e 100644
--- a/core/services/chainlink/config_general.go
+++ b/core/services/chainlink/config_general.go
@@ -35,11 +35,13 @@ import (
type generalConfig struct {
inputTOML string // user input, normalized via de/re-serialization
effectiveTOML string // with default values included
- secretsTOML string // with env overdies includes, redacted
+ secretsTOML string // with env overrides includes, redacted
c *Config // all fields non-nil (unless the legacy method signature return a pointer)
secrets *Secrets
+ warning error // warnings about inputTOML, e.g. deprecated fields
+
logLevelDefault zapcore.Level
appIDOnce sync.Once
@@ -123,7 +125,7 @@ func (o *GeneralConfigOpts) parseSecrets(secrets string) error {
return nil
}
-// New returns a coreconfig.GeneralConfig for the given options.
+// New returns a GeneralConfig for the given options.
func (o GeneralConfigOpts) New() (GeneralConfig, error) {
err := o.parse()
if err != nil {
@@ -135,6 +137,8 @@ func (o GeneralConfigOpts) New() (GeneralConfig, error) {
return nil, err
}
+ _, warning := utils.MultiErrorList(o.Config.deprecationWarnings())
+
o.Config.setDefaults()
if !o.SkipEnv {
err = o.Secrets.setEnv()
@@ -163,6 +167,7 @@ func (o GeneralConfigOpts) New() (GeneralConfig, error) {
secretsTOML: secrets,
c: &o.Config,
secrets: &o.Secrets,
+ warning: warning,
}
if lvl := o.Config.Log.Level; lvl != nil {
cfg.logLevelDefault = zapcore.Level(*lvl)
@@ -253,10 +258,13 @@ func validateEnv() (err error) {
return
}
-func (g *generalConfig) LogConfiguration(log coreconfig.LogfFn) {
+func (g *generalConfig) LogConfiguration(log, warn coreconfig.LogfFn) {
log("# Secrets:\n%s\n", g.secretsTOML)
log("# Input Configuration:\n%s\n", g.inputTOML)
log("# Effective Configuration, with defaults applied:\n%s\n", g.effectiveTOML)
+ if g.warning != nil {
+ warn("# Configuration warning:\n%s\n", g.warning)
+ }
}
// ConfigTOML implements chainlink.ConfigV2
diff --git a/core/services/chainlink/config_p2p_test.go b/core/services/chainlink/config_p2p_test.go
index d6adfe7051c..21ce8f17e48 100644
--- a/core/services/chainlink/config_p2p_test.go
+++ b/core/services/chainlink/config_p2p_test.go
@@ -4,9 +4,10 @@ import (
"testing"
"time"
- "github.com/smartcontractkit/libocr/commontypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/libocr/commontypes"
)
func TestP2PConfig(t *testing.T) {
@@ -23,7 +24,7 @@ func TestP2PConfig(t *testing.T) {
assert.True(t, p2p.TraceLogging())
v1 := p2p.V1()
- assert.False(t, v1.Enabled())
+ assert.True(t, v1.Enabled())
assert.Equal(t, "1.2.3.4", v1.AnnounceIP().String())
assert.Equal(t, uint16(1234), v1.AnnouncePort())
assert.Equal(t, time.Minute, v1.BootstrapCheckInterval())
@@ -38,7 +39,7 @@ func TestP2PConfig(t *testing.T) {
assert.Equal(t, time.Minute, v1.PeerstoreWriteInterval())
v2 := p2p.V2()
- assert.True(t, v2.Enabled())
+ assert.False(t, v2.Enabled())
assert.Equal(t, []string{"a", "b", "c"}, v2.AnnounceAddresses())
assert.ElementsMatch(
t,
diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go
index d811dd8209d..597dab6ba1c 100644
--- a/core/services/chainlink/config_test.go
+++ b/core/services/chainlink/config_test.go
@@ -389,7 +389,7 @@ func TestConfig_Marshal(t *testing.T) {
PeerID: mustPeerID("12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw"),
TraceLogging: ptr(true),
V1: toml.P2PV1{
- Enabled: ptr(false),
+ Enabled: ptr(true),
AnnounceIP: mustIP("1.2.3.4"),
AnnouncePort: ptr[uint16](1234),
BootstrapCheckInterval: models.MustNewDuration(time.Minute),
@@ -402,7 +402,7 @@ func TestConfig_Marshal(t *testing.T) {
PeerstoreWriteInterval: models.MustNewDuration(time.Minute),
},
V2: toml.P2PV2{
- Enabled: ptr(true),
+ Enabled: ptr(false),
AnnounceAddresses: &[]string{"a", "b", "c"},
DefaultBootstrappers: &[]ocrcommontypes.BootstrapperLocator{
{PeerID: "12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw", Addrs: []string{"foo:42", "bar:10"}},
@@ -820,7 +820,7 @@ PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw'
TraceLogging = true
[P2P.V1]
-Enabled = false
+Enabled = true
AnnounceIP = '1.2.3.4'
AnnouncePort = 1234
BootstrapCheckInterval = '1m0s'
@@ -833,7 +833,7 @@ NewStreamTimeout = '1s'
PeerstoreWriteInterval = '1m0s'
[P2P.V2]
-Enabled = true
+Enabled = false
AnnounceAddresses = ['a', 'b', 'c']
DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99']
DeltaDial = '1m0s'
@@ -1248,6 +1248,21 @@ func Test_generalConfig_LogConfiguration(t *testing.T) {
secrets = "# Secrets:\n"
input = "# Input Configuration:\n"
effective = "# Effective Configuration, with defaults applied:\n"
+ warning = "# Configuration warning:\n"
+
+ deprecated = `2 errors:
+ - P2P.V1: is deprecated and will be removed in a future version
+ - P2P.V1: 10 errors:
+ - AnnounceIP: is deprecated and will be removed in a future version
+ - AnnouncePort: is deprecated and will be removed in a future version
+ - BootstrapCheckInterval: is deprecated and will be removed in a future version
+ - DefaultBootstrapPeers: is deprecated and will be removed in a future version
+ - DHTAnnouncementCounterUserPrefix: is deprecated and will be removed in a future version
+ - DHTLookupInterval: is deprecated and will be removed in a future version
+ - ListenIP: is deprecated and will be removed in a future version
+ - ListenPort: is deprecated and will be removed in a future version
+ - NewStreamTimeout: is deprecated and will be removed in a future version
+ - PeerstoreWriteInterval: is deprecated and will be removed in a future version`
)
tests := []struct {
name string
@@ -1257,10 +1272,11 @@ func Test_generalConfig_LogConfiguration(t *testing.T) {
wantConfig string
wantEffective string
wantSecrets string
+ wantWarning string
}{
{name: "empty", wantEffective: emptyEffectiveTOML, wantSecrets: emptyEffectiveSecretsTOML},
{name: "full", inputSecrets: secretsFullTOML, inputConfig: fullTOML,
- wantConfig: fullTOML, wantEffective: fullTOML, wantSecrets: secretsFullRedactedTOML},
+ wantConfig: fullTOML, wantEffective: fullTOML, wantSecrets: secretsFullRedactedTOML, wantWarning: deprecated},
{name: "multi-chain", inputSecrets: secretsMultiTOML, inputConfig: multiChainTOML,
wantConfig: multiChainTOML, wantEffective: multiChainEffectiveTOML, wantSecrets: secretsMultiRedactedTOML},
}
@@ -1274,10 +1290,11 @@ func Test_generalConfig_LogConfiguration(t *testing.T) {
}
c, err := opts.New()
require.NoError(t, err)
- c.LogConfiguration(lggr.Infof)
+ c.LogConfiguration(lggr.Infof, lggr.Warnf)
inputLogs := observed.FilterMessageSnippet(secrets).All()
if assert.Len(t, inputLogs, 1) {
+ assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level)
got := strings.TrimPrefix(inputLogs[0].Message, secrets)
got = strings.TrimSuffix(got, "\n")
assert.Equal(t, tt.wantSecrets, got)
@@ -1285,6 +1302,7 @@ func Test_generalConfig_LogConfiguration(t *testing.T) {
inputLogs = observed.FilterMessageSnippet(input).All()
if assert.Len(t, inputLogs, 1) {
+ assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level)
got := strings.TrimPrefix(inputLogs[0].Message, input)
got = strings.TrimSuffix(got, "\n")
assert.Equal(t, tt.wantConfig, got)
@@ -1292,10 +1310,19 @@ func Test_generalConfig_LogConfiguration(t *testing.T) {
inputLogs = observed.FilterMessageSnippet(effective).All()
if assert.Len(t, inputLogs, 1) {
+ assert.Equal(t, zapcore.InfoLevel, inputLogs[0].Level)
got := strings.TrimPrefix(inputLogs[0].Message, effective)
got = strings.TrimSuffix(got, "\n")
assert.Equal(t, tt.wantEffective, got)
}
+
+ inputLogs = observed.FilterMessageSnippet(warning).All()
+ if tt.wantWarning != "" && assert.Len(t, inputLogs, 1) {
+ assert.Equal(t, zapcore.WarnLevel, inputLogs[0].Level)
+ got := strings.TrimPrefix(inputLogs[0].Message, warning)
+ got = strings.TrimSuffix(got, "\n")
+ assert.Equal(t, tt.wantWarning, got)
+ }
})
}
}
diff --git a/core/services/chainlink/mocks/general_config.go b/core/services/chainlink/mocks/general_config.go
index 8098b9634f5..0bc51ea4310 100644
--- a/core/services/chainlink/mocks/general_config.go
+++ b/core/services/chainlink/mocks/general_config.go
@@ -298,9 +298,9 @@ func (_m *GeneralConfig) Log() config.Log {
return r0
}
-// LogConfiguration provides a mock function with given fields: log
-func (_m *GeneralConfig) LogConfiguration(log config.LogfFn) {
- _m.Called(log)
+// LogConfiguration provides a mock function with given fields: log, warn
+func (_m *GeneralConfig) LogConfiguration(log config.LogfFn, warn config.LogfFn) {
+ _m.Called(log, warn)
}
// Mercury provides a mock function with given fields:
diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml
index e746d66777d..48d432138a8 100644
--- a/core/services/chainlink/testdata/config-empty-effective.toml
+++ b/core/services/chainlink/testdata/config-empty-effective.toml
@@ -142,7 +142,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -155,7 +155,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml
index 3bd422f8923..1534a411dc1 100644
--- a/core/services/chainlink/testdata/config-full.toml
+++ b/core/services/chainlink/testdata/config-full.toml
@@ -148,7 +148,7 @@ PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw'
TraceLogging = true
[P2P.V1]
-Enabled = false
+Enabled = true
AnnounceIP = '1.2.3.4'
AnnouncePort = 1234
BootstrapCheckInterval = '1m0s'
@@ -161,7 +161,7 @@ NewStreamTimeout = '1s'
PeerstoreWriteInterval = '1m0s'
[P2P.V2]
-Enabled = true
+Enabled = false
AnnounceAddresses = ['a', 'b', 'c']
DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99']
DeltaDial = '1m0s'
diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml
index 89b034169c6..1dcbfe3a830 100644
--- a/core/services/chainlink/testdata/config-multi-chain-effective.toml
+++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml
@@ -142,7 +142,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -155,7 +155,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go
index ce4e0895164..60904b58139 100644
--- a/core/services/ocr2/plugins/mercury/helpers_test.go
+++ b/core/services/ocr2/plugins/mercury/helpers_test.go
@@ -190,10 +190,6 @@ func setupNode(
c.P2P.PeerID = ptr(p2pKey.PeerID())
c.P2P.TraceLogging = ptr(true)
- // [P2P.V1]
- // Enabled = false
- c.P2P.V1.Enabled = ptr(false)
-
// [P2P.V2]
// Enabled = true
// AnnounceAddresses = ['$EXT_IP:17775']
diff --git a/core/utils/config/validate.go b/core/utils/config/validate.go
index 3ed0ffbabba..32cb94b5205 100644
--- a/core/utils/config/validate.go
+++ b/core/utils/config/validate.go
@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
+ "github.com/Masterminds/semver/v3"
"go.uber.org/multierr"
"github.com/smartcontractkit/chainlink-relay/pkg/config"
@@ -146,3 +147,16 @@ type ErrOverride struct {
func (e ErrOverride) Error() string {
return fmt.Sprintf("%s: overrides (duplicate keys or list elements) are not allowed for multiple secrets files", e.Name)
}
+
+type ErrDeprecated struct {
+ Name string
+ Version semver.Version
+}
+
+func (e ErrDeprecated) Error() string {
+ when := "a future version"
+ if e.Version != (semver.Version{}) {
+ when = fmt.Sprintf("version %s", e.Version)
+ }
+ return fmt.Sprintf("%s: is deprecated and will be removed in %s", e.Name, when)
+}
diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml
index e746d66777d..48d432138a8 100644
--- a/core/web/resolver/testdata/config-empty-effective.toml
+++ b/core/web/resolver/testdata/config-empty-effective.toml
@@ -142,7 +142,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -155,7 +155,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml
index 5a815b2e012..4b53396b94c 100644
--- a/core/web/resolver/testdata/config-full.toml
+++ b/core/web/resolver/testdata/config-full.toml
@@ -148,7 +148,7 @@ PeerID = '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw'
TraceLogging = true
[P2P.V1]
-Enabled = false
+Enabled = true
AnnounceIP = '1.2.3.4'
AnnouncePort = 1234
BootstrapCheckInterval = '1m0s'
@@ -161,7 +161,7 @@ NewStreamTimeout = '1s'
PeerstoreWriteInterval = '1m0s'
[P2P.V2]
-Enabled = true
+Enabled = false
AnnounceAddresses = ['a', 'b', 'c']
DefaultBootstrappers = ['12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@foo:42/bar:10', '12D3KooWMoejJznyDuEk5aX6GvbjaG12UzeornPCBNzMRqdwrFJw@test:99']
DeltaDial = '1m0s'
diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml
index 89b034169c6..1dcbfe3a830 100644
--- a/core/web/resolver/testdata/config-multi-chain-effective.toml
+++ b/core/web/resolver/testdata/config-multi-chain-effective.toml
@@ -142,7 +142,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -155,7 +155,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 44d018769ec..daeddf2ce66 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -29,9 +29,15 @@ These will eventually replace `TelemetryIngress.URL` and `TelemetryIngress.Serve
- LogPoller will now use finality tags to dynamically determine finality on evm chains if `UseFinalityTags=true`, rather than the fixed `FinalityDepth` specified in toml config
-### Upcoming Required Configuration Change
+### Changed
+
+- `P2P.V1` is now disabled (`Enabled = false`) by default. It must be explicitly enabled with `true` to be used. However, it is deprecated and will be removed in the future.
+- `P2P.V2` is now enabled (`Enabled = true`) by default.
-- Starting in 2.9.0, chainlink nodes will no longer allow `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey`. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]`
+### Upcoming Required Configuration Changes
+Starting in `v2.9.0`:
+- `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` will no longer be allowed. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]`
+- `P2P.V1` will no longer be supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed.
### Removed
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 9582a940c32..d97cbabd233 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -981,6 +981,8 @@ If both are configured, then for each link with another peer, V2 networking will
automatically fall back to V1. If V2 starts working again later, it will automatically be preferred again. This is useful
for migrating networks without downtime. Note that the two networking stacks _must not_ be configured to bind to the same IP/port.
+Note: P2P.V1 is deprecated will be removed in the future.
+
All nodes in the OCR network should share the same networking stack.
### IncomingMessageBufferSize
@@ -1017,7 +1019,7 @@ TraceLogging enables trace level logging.
## P2P.V1
```toml
[P2P.V1]
-Enabled = true # Default
+Enabled = false # Default
AnnounceIP = '1.2.3.4' # Example
AnnouncePort = 1337 # Example
BootstrapCheckInterval = '20s' # Default
@@ -1029,11 +1031,11 @@ ListenPort = 1337 # Example
NewStreamTimeout = '10s' # Default
PeerstoreWriteInterval = '5m' # Default
```
-
+P2P.V1 is deprecated and will be removed in a future version.
### Enabled
```toml
-Enabled = true # Default
+Enabled = false # Default
```
Enabled enables P2P V1.
@@ -1119,7 +1121,7 @@ PeerstoreWriteInterval controls how often the peerstore for the OCR V1 networkin
## P2P.V2
```toml
[P2P.V2]
-Enabled = false # Default
+Enabled = true # Default
AnnounceAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example
DefaultBootstrappers = ['12D3KooWMHMRLQkgPbFSYHwD3NBuwtS1AmxhvKVUrcfyaGDASR4U@1.2.3.4:9999', '12D3KooWM55u5Swtpw9r8aFLQHEtw7HR4t44GdNs654ej5gRs2Dh@example.com:1234'] # Example
DeltaDial = '15s' # Default
@@ -1130,7 +1132,7 @@ ListenAddresses = ['1.2.3.4:9999', '[a52d:0:a88:1274::abcd]:1337'] # Example
### Enabled
```toml
-Enabled = false # Default
+Enabled = true # Default
```
Enabled enables P2P V2.
Note: V1.Enabled is true by default, so it must be set false in order to run V2 only.
diff --git a/integration-tests/benchmark/keeper_test.go b/integration-tests/benchmark/keeper_test.go
index 7178ab854ea..a3db60f3b30 100644
--- a/integration-tests/benchmark/keeper_test.go
+++ b/integration-tests/benchmark/keeper_test.go
@@ -37,7 +37,6 @@ Enabled = true
[P2P]
[P2P.V2]
-Enabled = true
AnnounceAddresses = ["0.0.0.0:6690"]
ListenAddresses = ["0.0.0.0:6690"]
[Keeper]
diff --git a/integration-tests/chaos/automation_chaos_test.go b/integration-tests/chaos/automation_chaos_test.go
index 244f6c36ea9..6f2cacdb03e 100644
--- a/integration-tests/chaos/automation_chaos_test.go
+++ b/integration-tests/chaos/automation_chaos_test.go
@@ -37,7 +37,6 @@ Enabled = true
[P2P]
[P2P.V2]
-Enabled = true
AnnounceAddresses = ["0.0.0.0:6690"]
ListenAddresses = ["0.0.0.0:6690"]`
diff --git a/integration-tests/config/config.go b/integration-tests/config/config.go
index cd3f5983a28..44c108b0d7f 100644
--- a/integration-tests/config/config.go
+++ b/integration-tests/config/config.go
@@ -4,6 +4,10 @@ var (
BaseOCRP2PV1Config = `[OCR]
Enabled = true
+[P2P]
+[P2P.V2]
+Enabled = false
+
[P2P]
[P2P.V1]
Enabled = true
@@ -18,7 +22,6 @@ Enabled = true
[P2P]
[P2P.V2]
-Enabled = true
AnnounceAddresses = ["0.0.0.0:6690"]
ListenAddresses = ["0.0.0.0:6690"]`
@@ -67,7 +70,6 @@ CaptureEATelemetry = true
[P2P]
[P2P.V2]
-Enabled = true
ListenAddresses = ['0.0.0.0:6690']`
TelemetryIngressConfig = `[TelemetryIngress]
diff --git a/integration-tests/performance/ocr_test.go b/integration-tests/performance/ocr_test.go
index b18d7f1f791..f468a0e0370 100644
--- a/integration-tests/performance/ocr_test.go
+++ b/integration-tests/performance/ocr_test.go
@@ -102,6 +102,10 @@ func setupOCRTest(t *testing.T) (testEnvironment *environment.Environment, testN
baseTOML := `[OCR]
Enabled = true
+[P2P]
+[P2P.V2]
+Enabled = false
+
[P2P]
[P2P.V1]
Enabled = true
diff --git a/integration-tests/reorg/automation_reorg_test.go b/integration-tests/reorg/automation_reorg_test.go
index e94e5c28538..660d9c48e12 100644
--- a/integration-tests/reorg/automation_reorg_test.go
+++ b/integration-tests/reorg/automation_reorg_test.go
@@ -9,6 +9,9 @@ import (
"time"
"github.com/onsi/gomega"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+
"github.com/smartcontractkit/chainlink-env/environment"
"github.com/smartcontractkit/chainlink-env/pkg/cdk8s/blockscout"
"github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink"
@@ -17,8 +20,6 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/logging"
"github.com/smartcontractkit/chainlink-testing-framework/networks"
"github.com/smartcontractkit/chainlink-testing-framework/utils"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/client"
@@ -35,7 +36,6 @@ Enabled = true
[P2P]
[P2P.V2]
-Enabled = true
AnnounceAddresses = ["0.0.0.0:6690"]
ListenAddresses = ["0.0.0.0:6690"]`
networkTOML = `Enabled = true
diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go
index 834de5ce481..9b79e1bea40 100644
--- a/integration-tests/smoke/automation_test.go
+++ b/integration-tests/smoke/automation_test.go
@@ -1038,7 +1038,6 @@ func setupAutomationTestDocker(
clNodeConfig.Keeper.TurnLookBack = it_utils.Ptr[int64](int64(0))
clNodeConfig.Keeper.Registry.SyncInterval = &syncInterval
clNodeConfig.Keeper.Registry.PerformGasOverhead = it_utils.Ptr[uint32](uint32(150000))
- clNodeConfig.P2P.V2.Enabled = it_utils.Ptr[bool](true)
clNodeConfig.P2P.V2.AnnounceAddresses = &[]string{"0.0.0.0:6690"}
clNodeConfig.P2P.V2.ListenAddresses = &[]string{"0.0.0.0:6690"}
diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go
index 21dbeb8753c..d42944fd558 100644
--- a/integration-tests/smoke/keeper_test.go
+++ b/integration-tests/smoke/keeper_test.go
@@ -1098,7 +1098,7 @@ func setupKeeperTest(t *testing.T) (
contracts.LinkToken,
*test_env.CLClusterTestEnv,
) {
- clNodeConfig := node.NewConfig(node.NewBaseConfig())
+ clNodeConfig := node.NewConfig(node.NewBaseConfig(), node.WithP2Pv1())
turnLookBack := int64(0)
syncInterval := models.MustMakeDuration(5 * time.Second)
performGasOverhead := uint32(150000)
diff --git a/integration-tests/types/config/node/core.go b/integration-tests/types/config/node/core.go
index 966e270e518..37047cdb667 100644
--- a/integration-tests/types/config/node/core.go
+++ b/integration-tests/types/config/node/core.go
@@ -115,13 +115,14 @@ func WithP2Pv1() NodeConfigOpt {
ListenIP: utils2.MustIP("0.0.0.0"),
ListenPort: utils2.Ptr[uint16](6690),
}
+ // disabled default
+ c.P2P.V2 = toml.P2PV2{Enabled: utils2.Ptr(false)}
}
}
func WithP2Pv2() NodeConfigOpt {
return func(c *chainlink.Config) {
c.P2P.V2 = toml.P2PV2{
- Enabled: utils2.Ptr(true),
ListenAddresses: &[]string{"0.0.0.0:6690"},
}
}
@@ -130,14 +131,14 @@ func WithP2Pv2() NodeConfigOpt {
func WithTracing() NodeConfigOpt {
return func(c *chainlink.Config) {
c.Tracing = toml.Tracing{
- Enabled: utils2.Ptr(true),
+ Enabled: utils2.Ptr(true),
CollectorTarget: utils2.Ptr("otel-collector:4317"),
// ksortable unique id
- NodeID: utils2.Ptr(ksuid.New().String()),
- Attributes: map[string]string{
+ NodeID: utils2.Ptr(ksuid.New().String()),
+ Attributes: map[string]string{
"env": "smoke",
},
- SamplingRatio: utils2.Ptr(1.0),
+ SamplingRatio: utils2.Ptr(1.0),
}
}
}
diff --git a/integration-tests/types/config/node/defaults/sample.toml b/integration-tests/types/config/node/defaults/sample.toml
index 3663998003c..b0e1bc2a07d 100644
--- a/integration-tests/types/config/node/defaults/sample.toml
+++ b/integration-tests/types/config/node/defaults/sample.toml
@@ -15,7 +15,6 @@ DefaultTransactionQueueDepth = 0
[P2P]
[P2P.V2]
-Enabled = true
ListenAddresses = ['0.0.0.0:6690']
AnnounceAddresses = ['0.0.0.0:6690']
DeltaDial = '500ms'
diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar
index 06a623c9ca5..189476bfa84 100644
--- a/testdata/scripts/node/validate/default.txtar
+++ b/testdata/scripts/node/validate/default.txtar
@@ -154,7 +154,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -167,7 +167,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
index 1c0956b10d8..593aa0b21d0 100644
--- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
@@ -198,7 +198,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -211,7 +211,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
index 19e180e0cff..7b8aa5e3836 100644
--- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
@@ -198,7 +198,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -211,7 +211,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar
index 062a21b1967..ef6548619e1 100644
--- a/testdata/scripts/node/validate/disk-based-logging.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging.txtar
@@ -198,7 +198,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -211,7 +211,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar
index 0fa16473812..87b877bc882 100644
--- a/testdata/scripts/node/validate/invalid.txtar
+++ b/testdata/scripts/node/validate/invalid.txtar
@@ -188,7 +188,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -201,7 +201,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar
index 002c249fb7c..c607da10644 100644
--- a/testdata/scripts/node/validate/valid.txtar
+++ b/testdata/scripts/node/validate/valid.txtar
@@ -195,7 +195,7 @@ PeerID = ''
TraceLogging = false
[P2P.V1]
-Enabled = true
+Enabled = false
AnnounceIP = ''
AnnouncePort = 0
BootstrapCheckInterval = '20s'
@@ -208,7 +208,7 @@ NewStreamTimeout = '10s'
PeerstoreWriteInterval = '5m0s'
[P2P.V2]
-Enabled = false
+Enabled = true
AnnounceAddresses = []
DefaultBootstrappers = []
DeltaDial = '15s'
diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar
new file mode 100644
index 00000000000..ee7926f8f5f
--- /dev/null
+++ b/testdata/scripts/node/validate/warnings.txtar
@@ -0,0 +1,279 @@
+exec chainlink node -c config.toml -s secrets.toml validate
+cmp stdout out.txt
+
+-- config.toml --
+[P2P.V1]
+Enabled = true
+AnnounceIP = ''
+AnnouncePort = 0
+BootstrapCheckInterval = '20s'
+DefaultBootstrapPeers = []
+DHTAnnouncementCounterUserPrefix = 0
+DHTLookupInterval = 10
+ListenIP = '0.0.0.0'
+ListenPort = 0
+NewStreamTimeout = '10s'
+PeerstoreWriteInterval = '5m0s'
+
+-- secrets.toml --
+[Database]
+URL = 'postgresql://user:pass1234567890abcd@localhost:5432/dbname?sslmode=disable'
+
+[Password]
+Keystore = 'keystore_pass'
+
+-- out.txt --
+# Secrets:
+[Database]
+URL = 'xxxxx'
+AllowSimplePasswords = false
+
+[Password]
+Keystore = 'xxxxx'
+
+# Input Configuration:
+[P2P]
+[P2P.V1]
+Enabled = true
+AnnounceIP = ''
+AnnouncePort = 0
+BootstrapCheckInterval = '20s'
+DefaultBootstrapPeers = []
+DHTAnnouncementCounterUserPrefix = 0
+DHTLookupInterval = 10
+ListenIP = '0.0.0.0'
+ListenPort = 0
+NewStreamTimeout = '10s'
+PeerstoreWriteInterval = '5m0s'
+
+# Effective Configuration, with defaults applied:
+InsecureFastScrypt = false
+RootDir = '~/.chainlink'
+ShutdownGracePeriod = '5s'
+
+[Feature]
+FeedsManager = true
+LogPoller = false
+UICSAKeys = false
+
+[Database]
+DefaultIdleInTxSessionTimeout = '1h0m0s'
+DefaultLockTimeout = '15s'
+DefaultQueryTimeout = '10s'
+LogQueries = false
+MaxIdleConns = 10
+MaxOpenConns = 20
+MigrateOnStartup = true
+
+[Database.Backup]
+Dir = ''
+Frequency = '1h0m0s'
+Mode = 'none'
+OnVersionUpgrade = true
+
+[Database.Listener]
+MaxReconnectDuration = '10m0s'
+MinReconnectInterval = '1m0s'
+FallbackPollInterval = '30s'
+
+[Database.Lock]
+Enabled = true
+LeaseDuration = '10s'
+LeaseRefreshInterval = '1s'
+
+[TelemetryIngress]
+UniConn = true
+Logging = false
+BufferSize = 100
+MaxBatchSize = 50
+SendInterval = '500ms'
+SendTimeout = '10s'
+UseBatchSend = true
+URL = ''
+ServerPubKey = ''
+
+[AuditLogger]
+Enabled = false
+ForwardToUrl = ''
+JsonWrapperKey = ''
+Headers = []
+
+[Log]
+Level = 'info'
+JSONConsole = false
+UnixTS = false
+
+[Log.File]
+Dir = ''
+MaxSize = '5.12gb'
+MaxAgeDays = 0
+MaxBackups = 1
+
+[WebServer]
+AllowOrigins = 'http://localhost:3000,http://localhost:6688'
+BridgeResponseURL = ''
+BridgeCacheTTL = '0s'
+HTTPWriteTimeout = '10s'
+HTTPPort = 6688
+SecureCookies = true
+SessionTimeout = '15m0s'
+SessionReaperExpiration = '240h0m0s'
+HTTPMaxSize = '32.77kb'
+StartTimeout = '15s'
+ListenIP = '0.0.0.0'
+
+[WebServer.MFA]
+RPID = ''
+RPOrigin = ''
+
+[WebServer.RateLimit]
+Authenticated = 1000
+AuthenticatedPeriod = '1m0s'
+Unauthenticated = 5
+UnauthenticatedPeriod = '20s'
+
+[WebServer.TLS]
+CertPath = ''
+ForceRedirect = false
+Host = ''
+HTTPSPort = 6689
+KeyPath = ''
+ListenIP = '0.0.0.0'
+
+[JobPipeline]
+ExternalInitiatorsEnabled = false
+MaxRunDuration = '10m0s'
+MaxSuccessfulRuns = 10000
+ReaperInterval = '1h0m0s'
+ReaperThreshold = '24h0m0s'
+ResultWriteQueueDepth = 100
+
+[JobPipeline.HTTPRequest]
+DefaultTimeout = '15s'
+MaxSize = '32.77kb'
+
+[FluxMonitor]
+DefaultTransactionQueueDepth = 1
+SimulateTransactions = false
+
+[OCR2]
+Enabled = false
+ContractConfirmations = 3
+BlockchainTimeout = '20s'
+ContractPollInterval = '1m0s'
+ContractSubscribeInterval = '2m0s'
+ContractTransmitterTransmitTimeout = '10s'
+DatabaseTimeout = '10s'
+KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000'
+CaptureEATelemetry = false
+CaptureAutomationCustomTelemetry = false
+DefaultTransactionQueueDepth = 1
+SimulateTransactions = false
+TraceLogging = false
+
+[OCR]
+Enabled = false
+ObservationTimeout = '5s'
+BlockchainTimeout = '20s'
+ContractPollInterval = '1m0s'
+ContractSubscribeInterval = '2m0s'
+DefaultTransactionQueueDepth = 1
+KeyBundleID = '0000000000000000000000000000000000000000000000000000000000000000'
+SimulateTransactions = false
+TransmitterAddress = ''
+CaptureEATelemetry = false
+TraceLogging = false
+
+[P2P]
+IncomingMessageBufferSize = 10
+OutgoingMessageBufferSize = 10
+PeerID = ''
+TraceLogging = false
+
+[P2P.V1]
+Enabled = true
+AnnounceIP = ''
+AnnouncePort = 0
+BootstrapCheckInterval = '20s'
+DefaultBootstrapPeers = []
+DHTAnnouncementCounterUserPrefix = 0
+DHTLookupInterval = 10
+ListenIP = '0.0.0.0'
+ListenPort = 0
+NewStreamTimeout = '10s'
+PeerstoreWriteInterval = '5m0s'
+
+[P2P.V2]
+Enabled = true
+AnnounceAddresses = []
+DefaultBootstrappers = []
+DeltaDial = '15s'
+DeltaReconcile = '1m0s'
+ListenAddresses = []
+
+[Keeper]
+DefaultTransactionQueueDepth = 1
+GasPriceBufferPercent = 20
+GasTipCapBufferPercent = 20
+BaseFeeBufferPercent = 20
+MaxGracePeriod = 100
+TurnLookBack = 1000
+
+[Keeper.Registry]
+CheckGasOverhead = 200000
+PerformGasOverhead = 300000
+MaxPerformDataSize = 5000
+SyncInterval = '30m0s'
+SyncUpkeepQueueSize = 10
+
+[AutoPprof]
+Enabled = false
+ProfileRoot = ''
+PollInterval = '10s'
+GatherDuration = '10s'
+GatherTraceDuration = '5s'
+MaxProfileSize = '100.00mb'
+CPUProfileRate = 1
+MemProfileRate = 1
+BlockProfileRate = 1
+MutexProfileFraction = 1
+MemThreshold = '4.00gb'
+GoroutineThreshold = 5000
+
+[Pyroscope]
+ServerAddress = ''
+Environment = 'mainnet'
+
+[Sentry]
+Debug = false
+DSN = ''
+Environment = ''
+Release = ''
+
+[Insecure]
+DevWebServer = false
+OCRDevelopmentMode = false
+InfiniteDepthQueries = false
+DisableRateLimiting = false
+
+[Tracing]
+Enabled = false
+CollectorTarget = ''
+NodeID = ''
+SamplingRatio = 0.0
+
+# Configuration warning:
+2 errors:
+ - P2P.V1: is deprecated and will be removed in a future version
+ - P2P.V1: 10 errors:
+ - AnnounceIP: is deprecated and will be removed in a future version
+ - AnnouncePort: is deprecated and will be removed in a future version
+ - BootstrapCheckInterval: is deprecated and will be removed in a future version
+ - DefaultBootstrapPeers: is deprecated and will be removed in a future version
+ - DHTAnnouncementCounterUserPrefix: is deprecated and will be removed in a future version
+ - DHTLookupInterval: is deprecated and will be removed in a future version
+ - ListenIP: is deprecated and will be removed in a future version
+ - ListenPort: is deprecated and will be removed in a future version
+ - NewStreamTimeout: is deprecated and will be removed in a future version
+ - PeerstoreWriteInterval: is deprecated and will be removed in a future version
+Valid configuration.
From 7ac8696397a92270eab1905280f4debce68b69cc Mon Sep 17 00:00:00 2001
From: george-dorin <120329946+george-dorin@users.noreply.github.com>
Date: Fri, 27 Oct 2023 18:21:40 +0300
Subject: [PATCH 22/40] operator-ui deprecation warnings (#11104)
Co-authored-by: app-token-issuer-infra-releng[bot] <120227048+app-token-issuer-infra-releng[bot]@users.noreply.github.com>
Co-authored-by: github-merge-queue[bot]
Co-authored-by: Jordan Krage
---
operator_ui/TAG | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/operator_ui/TAG b/operator_ui/TAG
index 023fd8732cd..e08ca072670 100644
--- a/operator_ui/TAG
+++ b/operator_ui/TAG
@@ -1 +1 @@
-v0.8.0-06f745d
+v0.8.0-e10948a
From c2f75eb9fa79f49645595e555f3850cfa1ab4779 Mon Sep 17 00:00:00 2001
From: Erik Burton
Date: Mon, 6 Nov 2023 13:24:15 -0800
Subject: [PATCH 23/40] chore: bump sigstore/cosign-installer from 2.1.0 to
3.1.2
---
.github/actions/build-sign-publish-chainlink/action.yml | 2 +-
.github/actions/goreleaser-build-sign-publish/action.yml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/actions/build-sign-publish-chainlink/action.yml b/.github/actions/build-sign-publish-chainlink/action.yml
index 853702045e8..19b5c733e67 100644
--- a/.github/actions/build-sign-publish-chainlink/action.yml
+++ b/.github/actions/build-sign-publish-chainlink/action.yml
@@ -201,7 +201,7 @@ runs:
- if: inputs.sign-images == 'true'
name: Install cosign
- uses: sigstore/cosign-installer@581838fbedd492d2350a9ecd427a95d6de1e5d01 # v2.1.0
+ uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
with:
cosign-release: "v1.6.0"
diff --git a/.github/actions/goreleaser-build-sign-publish/action.yml b/.github/actions/goreleaser-build-sign-publish/action.yml
index 0cc144564c0..a9f32337221 100644
--- a/.github/actions/goreleaser-build-sign-publish/action.yml
+++ b/.github/actions/goreleaser-build-sign-publish/action.yml
@@ -84,7 +84,7 @@ runs:
version: ${{ inputs.zig-version }}
- name: Setup cosign
if: inputs.enable-cosign == 'true'
- uses: sigstore/cosign-installer@581838fbedd492d2350a9ecd427a95d6de1e5d01 # v2.1.0
+ uses: sigstore/cosign-installer@11086d25041f77fe8fe7b9ea4e48e3b9192b8f19 # v3.1.2
with:
cosign-release: ${{ inputs.cosign-version }}
- name: Login to docker registry
From c2ddce527d026416f7d8cbc223c4aed9d2b6f835 Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Tue, 14 Nov 2023 11:41:22 -0800
Subject: [PATCH 24/40] Finalize date on changelog for 2.7.0
---
docs/CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index daeddf2ce66..f122b365d28 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,7 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.7.0 - UNRELEASED
+
+
+## 2.7.0 - 2023-11-14
### Added
@@ -43,8 +45,6 @@ Starting in `v2.9.0`:
- Removed the ability to set a next nonce value for an address through CLI
-
-
## 2.6.0 - 2023-10-18
### Added
From 120e7788c992773fe9511e85dd1394e8f4fa4319 Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Wed, 15 Nov 2023 15:40:45 -0800
Subject: [PATCH 25/40] Bump version and update CHANGELOG for core v2.7.1
Signed-off-by: Sneha Agnihotri
---
VERSION | 2 +-
docs/CHANGELOG.md | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index 24ba9a38de6..860487ca19c 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.7.0
+2.7.1
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index f122b365d28..b9ab38eb3bd 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
+## 2.7.1 - UNRELEASED
+
## 2.7.0 - 2023-11-14
From 184cdfbf2274a8a01be084f127b82bc169eec67c Mon Sep 17 00:00:00 2001
From: amit-momin <108959691+amit-momin@users.noreply.github.com>
Date: Thu, 16 Nov 2023 14:44:39 -0600
Subject: [PATCH 26/40] [Hotfix] Update loading next sequence map to avoid
startup failure (#11319)
---
common/txmgr/broadcaster.go | 77 +++++----
core/chains/evm/txmgr/broadcaster_test.go | 180 +++++++++++++++-------
docs/CHANGELOG.md | 6 +
3 files changed, 181 insertions(+), 82 deletions(-)
diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go
index 6512f67fe0b..1e1c0e0cff3 100644
--- a/common/txmgr/broadcaster.go
+++ b/common/txmgr/broadcaster.go
@@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"fmt"
+ "slices"
"sync"
"time"
@@ -243,10 +244,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) star
eb.sequenceLock.Lock()
defer eb.sequenceLock.Unlock()
- eb.nextSequenceMap, err = eb.loadNextSequenceMap(eb.enabledAddresses)
- if err != nil {
- return errors.Wrap(err, "Broadcaster: failed to load next sequence map")
- }
+ eb.nextSequenceMap = eb.loadNextSequenceMap(eb.enabledAddresses)
eb.isStarted = true
return nil
@@ -326,30 +324,38 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) txIn
}
// Load the next sequence map using the tx table or on-chain (if not found in tx table)
-func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) loadNextSequenceMap(addresses []ADDR) (map[ADDR]SEQ, error) {
+func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) loadNextSequenceMap(addresses []ADDR) map[ADDR]SEQ {
ctx, cancel := eb.chStop.NewCtx()
defer cancel()
nextSequenceMap := make(map[ADDR]SEQ)
for _, address := range addresses {
- // Get the highest sequence from the tx table
- // Will need to be incremented since this sequence is already used
- seq, err := eb.txStore.FindLatestSequence(ctx, address, eb.chainID)
- if err != nil {
- // Look for nonce on-chain if no tx found for address in TxStore or if error occurred
- // Returns the nonce that should be used for the next transaction so no need to increment
- seq, err = eb.client.PendingSequenceAt(ctx, address)
- if err != nil {
- return nil, errors.New("failed to retrieve next sequence from on-chain causing failure to load next sequence map on broadcaster startup")
- }
-
+ seq, err := eb.getSequenceForAddr(ctx, address)
+ if err == nil {
nextSequenceMap[address] = seq
- } else {
- nextSequenceMap[address] = eb.generateNextSequence(seq)
}
}
- return nextSequenceMap, nil
+ return nextSequenceMap
+}
+
+func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) getSequenceForAddr(ctx context.Context, address ADDR) (seq SEQ, err error) {
+ // Get the highest sequence from the tx table
+ // Will need to be incremented since this sequence is already used
+ seq, err = eb.txStore.FindLatestSequence(ctx, address, eb.chainID)
+ if err == nil {
+ seq = eb.generateNextSequence(seq)
+ return seq, nil
+ }
+ // Look for nonce on-chain if no tx found for address in TxStore or if error occurred
+ // Returns the nonce that should be used for the next transaction so no need to increment
+ seq, err = eb.client.PendingSequenceAt(ctx, address)
+ if err == nil {
+ return seq, nil
+ }
+ eb.logger.Criticalw("failed to retrieve next sequence from on-chain for address: ", "address", address.String())
+ return seq, err
+
}
func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) newSequenceSyncBackoff() backoff.Backoff {
@@ -432,7 +438,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) moni
// syncSequence tries to sync the key sequence, retrying indefinitely until success or stop signal is sent
func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) SyncSequence(ctx context.Context, addr ADDR) {
sequenceSyncRetryBackoff := eb.newSequenceSyncBackoff()
- localSequence, err := eb.GetNextSequence(addr)
+ localSequence, err := eb.GetNextSequence(ctx, addr)
// Address not found in map so skip sync
if err != nil {
eb.logger.Criticalw("Failed to retrieve local next sequence for address", "address", addr.String(), "err", err)
@@ -646,7 +652,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
observeTimeUntilBroadcast(eb.chainID, etx.CreatedAt, time.Now())
// Check if from_address exists in map to ensure it is valid before broadcasting
var sequence SEQ
- sequence, err = eb.GetNextSequence(etx.FromAddress)
+ sequence, err = eb.GetNextSequence(ctx, etx.FromAddress)
if err != nil {
return err, true
}
@@ -704,7 +710,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand
// Check if from_address exists in map to ensure it is valid before broadcasting
var sequence SEQ
- sequence, err = eb.GetNextSequence(etx.FromAddress)
+ sequence, err = eb.GetNextSequence(ctx, etx.FromAddress)
if err != nil {
return err, true
}
@@ -741,7 +747,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) next
return nil, errors.Wrap(err, "findNextUnstartedTransactionFromAddress failed")
}
- sequence, err := eb.GetNextSequence(etx.FromAddress)
+ sequence, err := eb.GetNextSequence(ctx, etx.FromAddress)
if err != nil {
return nil, err
}
@@ -826,15 +832,32 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) save
}
// Used to get the next usable sequence for a transaction
-func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetNextSequence(address ADDR) (seq SEQ, err error) {
+func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetNextSequence(ctx context.Context, address ADDR) (seq SEQ, err error) {
eb.sequenceLock.Lock()
defer eb.sequenceLock.Unlock()
// Get next sequence from map
seq, exists := eb.nextSequenceMap[address]
- if !exists {
- return seq, errors.New(fmt.Sprint("address not found in next sequence map: ", address))
+ if exists {
+ return seq, nil
+ }
+
+ eb.logger.Infow("address not found in local next sequence map. Attempting to search and populate sequence.", "address", address.String())
+ // Check if address is in the enabled address list
+ if !slices.Contains(eb.enabledAddresses, address) {
+ return seq, fmt.Errorf("address disabled: %s", address)
}
- return seq, nil
+
+ // Try to retrieve next sequence from tx table or on-chain to load the map
+ // A scenario could exist where loading the map during startup failed (e.g. All configured RPC's are unreachable at start)
+ // The expectation is that the node does not fail startup so sequences need to be loaded during runtime
+ foundSeq, err := eb.getSequenceForAddr(ctx, address)
+ if err != nil {
+ return seq, fmt.Errorf("failed to find next sequence for address: %s", address)
+ }
+
+ // Set sequence in map
+ eb.nextSequenceMap[address] = foundSeq
+ return foundSeq, nil
}
// Used to increment the sequence in the mapping to have the next usable one available for the next transaction
diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go
index 6f9308548b3..dd2a124be49 100644
--- a/core/chains/evm/txmgr/broadcaster_test.go
+++ b/core/chains/evm/txmgr/broadcaster_test.go
@@ -141,6 +141,43 @@ func TestEthBroadcaster_Lifecycle(t *testing.T) {
require.NoError(t, eb.XXXTestCloseInternal())
}
+// Failure to load next sequnce map should not fail Broadcaster startup
+func TestEthBroadcaster_LoadNextSequenceMapFailure_StartupSuccess(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ cfg := configtest.NewTestGeneralConfig(t)
+ eventBroadcaster := cltest.NewEventBroadcaster(t, cfg.Database().URL())
+ err := eventBroadcaster.Start(testutils.Context(t))
+ require.NoError(t, err)
+ t.Cleanup(func() { assert.NoError(t, eventBroadcaster.Close()) })
+ txStore := cltest.NewTestTxStore(t, db, cfg.Database())
+ evmcfg := evmtest.NewChainScopedConfig(t, cfg)
+ ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
+ ethKeyStore := cltest.NewKeyStore(t, db, cfg.Database()).Eth()
+ cltest.MustInsertRandomKeyReturningState(t, ethKeyStore)
+ estimator := gasmocks.NewEvmFeeEstimator(t)
+ txBuilder := txmgr.NewEvmTxAttemptBuilder(*ethClient.ConfiguredChainID(), evmcfg.EVM().GasEstimator(), ethKeyStore, estimator)
+ ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), errors.New("Getting on-chain nonce failed"))
+ eb := txmgr.NewEvmBroadcaster(
+ txStore,
+ txmgr.NewEvmTxmClient(ethClient),
+ txmgr.NewEvmTxmConfig(evmcfg.EVM()),
+ txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()),
+ evmcfg.EVM().Transactions(),
+ evmcfg.Database().Listener(),
+ ethKeyStore,
+ eventBroadcaster,
+ txBuilder,
+ nil,
+ logger.TestLogger(t),
+ &testCheckerFactory{},
+ false,
+ )
+
+ // Instance starts without error even if loading next sequence map fails
+ err = eb.Start(testutils.Context(t))
+ require.NoError(t, err)
+}
+
func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) {
db := pgtest.NewSqlxDB(t)
cfg := configtest.NewTestGeneralConfig(t)
@@ -961,7 +998,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_ResumingFromCrash(t *testing.T) {
}
func getLocalNextNonce(t *testing.T, eb *txmgr.Broadcaster, fromAddress gethCommon.Address) uint64 {
- n, err := eb.GetNextSequence(fromAddress)
+ n, err := eb.GetNextSequence(testutils.Context(t), fromAddress)
require.NoError(t, err)
require.NotNil(t, n)
return uint64(n)
@@ -987,6 +1024,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg, &testCheckerFactory{}, false)
+ ctx := testutils.Context(t)
require.NoError(t, utils.JustError(db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`)))
@@ -998,7 +1036,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Successful, errors.New("replacement transaction underpriced")).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1034,7 +1072,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1051,7 +1089,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// Check that the key had its nonce reset
var nonce evmtypes.Nonce
- nonce, err = eb.GetNextSequence(fromAddress)
+ nonce, err = eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
// Saved NextNonce must be the same as before because this transaction
// was not accepted by the eth node and never can be
@@ -1084,7 +1122,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), "something exploded in the callback")
assert.True(t, retryable)
@@ -1106,7 +1144,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Fatal, errors.New(fatalErrorExample)).Once()
{
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
}
@@ -1124,7 +1162,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce), nil).Once()
eb2 := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), txmgr.NewEvmTxmConfig(evmcfg.EVM()), txmgr.NewEvmTxmFeeConfig(evmcfg.EVM().GasEstimator()), evmcfg.EVM().Transactions(), evmcfg.Database().Listener(), ethKeyStore, eventBroadcaster, txBuilder, nil, lggr, &testCheckerFactory{}, false)
require.NoError(t, err)
- retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
})
@@ -1146,7 +1184,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// another node even if the primary one returns "exceeds the configured
// cap"
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
assert.Contains(t, err.Error(), "tx fee (1.10 ether) exceeds the configured cap (1.00 ether)")
assert.Contains(t, err.Error(), "error while sending transaction")
@@ -1166,7 +1204,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// Check that the key had its nonce reset
var nonce evmtypes.Nonce
- nonce, err = eb.GetNextSequence(fromAddress)
+ nonce, err = eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
// Saved NextNonce must be the same as before because this transaction
// was not accepted by the eth node and never can be
@@ -1175,7 +1213,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
// On the second try, the tx has been accepted into the mempool
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce+1), nil).Once()
- retryable, err = eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1203,7 +1241,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce), nil).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), retryableErrorExample)
assert.True(t, retryable)
@@ -1226,7 +1264,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.Successful, nil).Once()
- retryable, err = eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1254,7 +1292,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), errors.New("pending nonce fetch failed")).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), retryableErrorExample)
require.Contains(t, err.Error(), "pending nonce fetch failed")
@@ -1278,7 +1316,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.Successful, nil).Once()
- retryable, err = eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err = eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1307,7 +1345,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(localNextNonce+1), nil).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.NoError(t, err)
assert.False(t, retryable)
@@ -1349,7 +1387,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Successful, nil).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.NoError(t, err)
assert.False(t, retryable)
@@ -1385,7 +1423,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Retryable, failedToReachNodeError).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
assert.Contains(t, err.Error(), "context deadline exceeded")
assert.True(t, retryable)
@@ -1416,7 +1454,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Successful, errors.New(temporarilyUnderpricedError)).Once()
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
assert.NoError(t, err)
assert.False(t, retryable)
@@ -1456,7 +1494,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
// Do the thing
- retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), "bumped fee price of 20 gwei is equal to original fee price of 20 gwei. ACTION REQUIRED: This is a configuration error, you must increase either FeeEstimator.BumpPercent or FeeEstimator.BumpMin")
assert.True(t, retryable)
@@ -1473,7 +1511,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.InsufficientFunds, errors.New(insufficientEthError)).Once()
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
assert.Contains(t, err.Error(), "insufficient funds for transfer")
assert.True(t, retryable)
@@ -1503,7 +1541,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce
}), fromAddress).Return(clienttypes.Retryable, errors.New(nonceGapError)).Once()
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
assert.Contains(t, err.Error(), nonceGapError)
assert.True(t, retryable)
@@ -1548,7 +1586,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
}), fromAddress).Return(clienttypes.Underpriced, errors.New(underpricedError)).Once()
// Check gas tip cap verification
- retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), "bumped gas tip cap of 1 wei is less than or equal to original gas tip cap of 1 wei")
assert.True(t, retryable)
@@ -1572,7 +1610,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(localNextNonce, nil).Once()
eb2 := NewTestEthBroadcaster(t, txStore, ethClient, ethKeyStore, evmcfg2, &testCheckerFactory{}, false)
- retryable, err := eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb2.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), "specified gas tip cap of 0 is below min configured gas tip of 1 wei for key")
assert.True(t, retryable)
@@ -1599,7 +1637,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) {
return tx.Nonce() == localNextNonce && tx.GasTipCap().Cmp(big.NewInt(0).Add(gasTipCapDefault.ToInt(), big.NewInt(0).Mul(evmcfg2.EVM().GasEstimator().BumpMin().ToInt(), big.NewInt(2)))) == 0
}), fromAddress).Return(clienttypes.Successful, nil).Once()
- retryable, err = eb2.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err = eb2.ProcessUnstartedTxs(ctx, fromAddress)
require.NoError(t, err)
assert.False(t, retryable)
@@ -1631,7 +1669,8 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) {
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, kst, evmcfg, &testCheckerFactory{}, false)
- _, err := eb.GetNextSequence(fromAddress)
+ ctx := testutils.Context(t)
+ _, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
t.Run("tx signing fails", func(t *testing.T) {
@@ -1645,7 +1684,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) {
})).Return(&tx, errors.New("could not sign transaction"))
// Do the thing
- retryable, err := eb.ProcessUnstartedTxs(testutils.Context(t), fromAddress)
+ retryable, err := eb.ProcessUnstartedTxs(ctx, fromAddress)
require.Error(t, err)
require.Contains(t, err.Error(), "could not sign transaction")
assert.True(t, retryable)
@@ -1659,7 +1698,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) {
// Check that the key did not have its nonce incremented
var nonce types.Nonce
- nonce, err = eb.GetNextSequence(fromAddress)
+ nonce, err = eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
require.Equal(t, int64(localNonce), int64(nonce))
})
@@ -1697,12 +1736,13 @@ func TestEthBroadcaster_IncrementNextNonce(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, kst, evmcfg, &testCheckerFactory{}, false)
- nonce, err := eb.GetNextSequence(fromAddress)
+ ctx := testutils.Context(t)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
eb.IncrementNextSequence(fromAddress, nonce)
// Nonce bumped to 1
- nonce, err = eb.GetNextSequence(fromAddress)
+ nonce, err = eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
require.Equal(t, int64(1), int64(nonce))
}
@@ -1784,7 +1824,7 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
kst.On("EnabledAddressesForChain", &cltest.FixtureChainID).Return(addresses, nil).Once()
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
eb := txmgr.NewEvmBroadcaster(txStore, txmgr.NewEvmTxmClient(ethClient), evmTxmCfg, txmgr.NewEvmTxmFeeConfig(ge), evmcfg.EVM().Transactions(), cfg.Database().Listener(), kst, eventBroadcaster, txBuilder, nil, lggr, checkerFactory, false)
- err := eb.Start(testutils.Context(t))
+ err := eb.Start(ctx)
assert.NoError(t, err)
defer func() { assert.NoError(t, eb.Close()) }()
@@ -1810,12 +1850,12 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
testutils.WaitForLogMessage(t, observed, "Fast-forward sequence")
// Check nextSequenceMap to make sure it has correct nonce assigned
- nonce, err := eb.GetNextSequence(fromAddress)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
- assert.Equal(t, strconv.FormatUint(ethNodeNonce, 10), nonce.String())
+ require.Equal(t, strconv.FormatUint(ethNodeNonce, 10), nonce.String())
// The disabled key did not get updated
- _, err = eb.GetNextSequence(disabledAddress)
+ _, err = eb.GetNextSequence(ctx, disabledAddress)
require.Error(t, err)
})
@@ -1844,19 +1884,19 @@ func TestEthBroadcaster_SyncNonce(t *testing.T) {
testutils.WaitForLogMessage(t, observed, "Fast-forward sequence")
// Check keyState to make sure it has correct nonce assigned
- nonce, err := eb.GetNextSequence(fromAddress)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
assert.Equal(t, int64(ethNodeNonce), int64(nonce))
// The disabled key did not get updated
- _, err = eb.GetNextSequence(disabledAddress)
+ _, err = eb.GetNextSequence(ctx, disabledAddress)
require.Error(t, err)
})
-
}
func Test_LoadSequenceMap(t *testing.T) {
t.Parallel()
+ ctx := testutils.Context(t)
t.Run("set next nonce using entries from tx table", func(t *testing.T) {
db := pgtest.NewSqlxDB(t)
cfg := configtest.NewTestGeneralConfig(t)
@@ -1871,9 +1911,9 @@ func Test_LoadSequenceMap(t *testing.T) {
cltest.MustInsertUnconfirmedEthTx(t, txStore, int64(1), fromAddress)
eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
- nonce, err := eb.GetNextSequence(fromAddress)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
- assert.Equal(t, int64(2), int64(nonce))
+ require.Equal(t, int64(2), int64(nonce))
})
t.Run("set next nonce using client when not found in tx table", func(t *testing.T) {
@@ -1889,9 +1929,9 @@ func Test_LoadSequenceMap(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(10), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
- nonce, err := eb.GetNextSequence(fromAddress)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
- assert.Equal(t, int64(10), int64(nonce))
+ require.Equal(t, int64(10), int64(nonce))
})
}
@@ -1910,25 +1950,53 @@ func Test_NextNonce(t *testing.T) {
_, addr1 := cltest.MustInsertRandomKey(t, ks)
ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
-
+ ctx := testutils.Context(t)
cltest.MustInsertRandomKey(t, ks, *utils.NewBig(testutils.FixtureChainID))
- nonce, err := eb.GetNextSequence(addr1)
+ nonce, err := eb.GetNextSequence(ctx, addr1)
require.NoError(t, err)
- assert.Equal(t, randNonce, int64(nonce))
+ require.Equal(t, randNonce, int64(nonce))
randAddr1 := utils.RandomAddress()
- _, err = eb.GetNextSequence(randAddr1)
+ _, err = eb.GetNextSequence(ctx, randAddr1)
require.Error(t, err)
- assert.Contains(t, err.Error(), fmt.Sprintf("address not found in next sequence map: %s", randAddr1.Hex()))
+ require.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr1.Hex()))
randAddr2 := utils.RandomAddress()
- _, err = eb.GetNextSequence(randAddr2)
+ _, err = eb.GetNextSequence(ctx, randAddr2)
require.Error(t, err)
- assert.Contains(t, err.Error(), fmt.Sprintf("address not found in next sequence map: %s", randAddr2.Hex()))
+ require.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr2.Hex()))
}
+func Test_SetNonceAfterInit(t *testing.T) {
+ t.Parallel()
+
+ db := pgtest.NewSqlxDB(t)
+ cfg := configtest.NewTestGeneralConfig(t)
+ txStore := cltest.NewTestTxStore(t, db, cfg.Database())
+ ks := cltest.NewKeyStore(t, db, cfg.Database()).Eth()
+
+ ethClient := evmtest.NewEthClientMockWithDefaultChain(t)
+ evmcfg := evmtest.NewChainScopedConfig(t, cfg)
+ checkerFactory := &txmgr.CheckerFactory{Client: ethClient}
+ randNonce := testutils.NewRandomPositiveInt64()
+ _, addr1 := cltest.MustInsertRandomKey(t, ks)
+ ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(0), errors.New("failed to retrieve nonce at startup")).Once()
+ ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once()
+ eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
+
+ ctx := testutils.Context(t)
+ nonce, err := eb.GetNextSequence(ctx, addr1)
+ require.NoError(t, err)
+ require.Equal(t, randNonce, int64(nonce))
+
+ // Test that the new nonce is set in the map and does not need a client call to retrieve on subsequent calls
+ nonce, err = eb.GetNextSequence(ctx, addr1)
+ require.NoError(t, err)
+ require.Equal(t, randNonce, int64(nonce))
+}
+
func Test_IncrementNextNonce(t *testing.T) {
t.Parallel()
@@ -1945,26 +2013,27 @@ func Test_IncrementNextNonce(t *testing.T) {
ethClient.On("PendingNonceAt", mock.Anything, addr1).Return(uint64(randNonce), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
- nonce, err := eb.GetNextSequence(addr1)
+ ctx := testutils.Context(t)
+ nonce, err := eb.GetNextSequence(ctx, addr1)
require.NoError(t, err)
eb.IncrementNextSequence(addr1, nonce)
- nonce, err = eb.GetNextSequence(addr1)
+ nonce, err = eb.GetNextSequence(ctx, addr1)
require.NoError(t, err)
assert.Equal(t, randNonce+1, int64(nonce))
eb.IncrementNextSequence(addr1, nonce)
- nonce, err = eb.GetNextSequence(addr1)
+ nonce, err = eb.GetNextSequence(ctx, addr1)
require.NoError(t, err)
assert.Equal(t, randNonce+2, int64(nonce))
randAddr1 := utils.RandomAddress()
- _, err = eb.GetNextSequence(randAddr1)
+ _, err = eb.GetNextSequence(ctx, randAddr1)
require.Error(t, err)
- assert.Contains(t, err.Error(), fmt.Sprintf("address not found in next sequence map: %s", randAddr1.Hex()))
+ assert.Contains(t, err.Error(), fmt.Sprintf("address disabled: %s", randAddr1.Hex()))
// verify it didnt get changed by any erroring calls
- nonce, err = eb.GetNextSequence(addr1)
+ nonce, err = eb.GetNextSequence(ctx, addr1)
require.NoError(t, err)
assert.Equal(t, randNonce+2, int64(nonce))
}
@@ -1983,14 +2052,15 @@ func Test_SetNextNonce(t *testing.T) {
_, fromAddress := cltest.MustInsertRandomKey(t, ks)
ethClient.On("PendingNonceAt", mock.Anything, fromAddress).Return(uint64(0), nil).Once()
eb := NewTestEthBroadcaster(t, txStore, ethClient, ks, evmcfg, checkerFactory, false)
+ ctx := testutils.Context(t)
t.Run("update next nonce", func(t *testing.T) {
- nonce, err := eb.GetNextSequence(fromAddress)
+ nonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
assert.Equal(t, int64(0), int64(nonce))
eb.SetNextSequence(fromAddress, evmtypes.Nonce(24))
- newNextNonce, err := eb.GetNextSequence(fromAddress)
+ newNextNonce, err := eb.GetNextSequence(ctx, fromAddress)
require.NoError(t, err)
assert.Equal(t, int64(24), int64(newNextNonce))
})
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index b9ab38eb3bd..a58a2eb4a76 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -13,6 +13,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## 2.7.1 - UNRELEASED
+### Fixed
+
+- Fixed a bug that causes the node to shutdown if all configured RPC's are unreachable during startup.
+
## 2.7.0 - 2023-11-14
@@ -39,7 +43,9 @@ These will eventually replace `TelemetryIngress.URL` and `TelemetryIngress.Serve
- `P2P.V2` is now enabled (`Enabled = true`) by default.
### Upcoming Required Configuration Changes
+
Starting in `v2.9.0`:
+
- `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` will no longer be allowed. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]`
- `P2P.V1` will no longer be supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed.
From a564461f6c2eedb3b324b37d8cdf56f7cec95a69 Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Tue, 21 Nov 2023 11:01:31 -0800
Subject: [PATCH 27/40] Finalize date on changelog for 2.7.1
---
docs/CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index a58a2eb4a76..50c8a389932 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,14 +11,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.7.1 - UNRELEASED
+
+
+## 2.7.1 - 2023-11-21
### Fixed
- Fixed a bug that causes the node to shutdown if all configured RPC's are unreachable during startup.
-
-
## 2.7.0 - 2023-11-14
### Added
From 39ba467207bf2f5cf73f5a067616261bcba8499c Mon Sep 17 00:00:00 2001
From: anirudhwarrier <12178754+anirudhwarrier@users.noreply.github.com>
Date: Thu, 23 Nov 2023 10:49:53 +0400
Subject: [PATCH 28/40] fix failing CI tests for 2.7.1
---
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 7dd2d017785..b90ba784a38 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -21,7 +21,7 @@ require (
github.com/segmentio/ksuid v1.0.4
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-env v0.38.3
- github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36
+ github.com/smartcontractkit/chainlink-testing-framework v1.17.13
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545
github.com/smartcontractkit/ocr2keepers v0.7.27
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 3be74077277..8c3dc2c65c2 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -2370,8 +2370,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab0
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb/go.mod h1:/30flFG4L/iCYAFeA3DUzR0xuHSxAMONiWTzyzvsNwo=
-github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36 h1:ow84QG8vEHMvfjGg0RF8HNYh80WcHci6PIenXyY6K8Y=
-github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk=
+github.com/smartcontractkit/chainlink-testing-framework v1.17.13 h1:C8E+P3/ElUxmsIHE0fOd1hDoSc3AbbqcePmN9sNKYdM=
+github.com/smartcontractkit/chainlink-testing-framework v1.17.13/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU=
From 19e3b4ed835bb664cbb642af4c8355307153f93a Mon Sep 17 00:00:00 2001
From: Tate
Date: Mon, 27 Nov 2023 09:15:09 -0700
Subject: [PATCH 29/40] bump solana version in tests built
---
.github/workflows/integration-tests.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index aadb14f1284..b1a1e663569 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -747,7 +747,7 @@ jobs:
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
with:
repository: smartcontractkit/chainlink-solana
- ref: 23816fcf7d380a30c87b6d87e4fb0ca94419b259 # swtich back to this after the next solana release${{ needs.get_solana_sha.outputs.sha }}
+ ref: a28100b7f2954604a8ca2ff9ec7bccc6ec952953 # swtich back to this after the next solana release${{ needs.get_solana_sha.outputs.sha }}
- name: Build Test Image
if: needs.changes.outputs.src == 'true' && needs.solana-test-image-exists.outputs.exists == 'false'
uses: ./.github/actions/build-test-image
@@ -805,7 +805,7 @@ jobs:
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
with:
repository: smartcontractkit/chainlink-solana
- ref: ${{ needs.get_solana_sha.outputs.sha }}
+ ref: a28100b7f2954604a8ca2ff9ec7bccc6ec952953 # temporarily using specific commit for release branch ${{ needs.get_solana_sha.outputs.sha }}
- name: Run Setup
if: needs.changes.outputs.src == 'true'
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
From 33a890f01e49da091fa8ec148b74b28f5edc5494 Mon Sep 17 00:00:00 2001
From: Lei
Date: Mon, 4 Dec 2023 11:27:30 -0800
Subject: [PATCH 30/40] Revert "make streams lookup modular (#11368)" (#11482)
This reverts commit fa0f16ad0acf417db1186728560278a049357914.
---
core/scripts/chaincli/handler/debug.go | 150 ++---
.../handler/mercury_lookup_handler.go | 534 ++++++++++++++++++
core/scripts/go.mod | 3 +-
core/scripts/go.sum | 2 +
.../v21/mercury/streams/streams.go | 143 +++--
.../v21/mercury/streams/streams_test.go | 20 +-
6 files changed, 666 insertions(+), 186 deletions(-)
create mode 100644 core/scripts/chaincli/handler/mercury_lookup_handler.go
diff --git a/core/scripts/chaincli/handler/debug.go b/core/scripts/chaincli/handler/debug.go
index 0075862d95d..fec8c6cd414 100644
--- a/core/scripts/chaincli/handler/debug.go
+++ b/core/scripts/chaincli/handler/debug.go
@@ -22,17 +22,12 @@ import (
ocr2keepers "github.com/smartcontractkit/chainlink-automation/pkg/v3/types"
- evm21 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21"
-
"github.com/smartcontractkit/chainlink/core/scripts/chaincli/config"
"github.com/smartcontractkit/chainlink/core/scripts/common"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/automation_utils_2_1"
iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/models"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/encoding"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams"
"github.com/smartcontractkit/chainlink/v2/core/utils"
bigmath "github.com/smartcontractkit/chainlink/v2/core/utils/big_math"
@@ -41,7 +36,12 @@ import (
const (
ConditionTrigger uint8 = iota
LogTrigger
+
+ blockNumber = "blockNumber"
expectedTypeAndVersion = "KeeperRegistry 2.1.0"
+ feedIdHex = "feedIdHex"
+ feedIDs = "feedIDs"
+ timestamp = "timestamp"
)
var packer = encoding.NewAbiPacker()
@@ -125,8 +125,6 @@ func (k *Keeper) Debug(ctx context.Context, args []string) {
var checkResult iregistry21.CheckUpkeep
var blockNum uint64
var performData []byte
- var workID [32]byte
- var trigger ocr2keepers.Trigger
upkeepNeeded := false
// check upkeep
if triggerType == ConditionTrigger {
@@ -179,8 +177,7 @@ func (k *Keeper) Debug(ctx context.Context, args []string) {
}
// check that tx for this upkeep / tx was not already performed
message(fmt.Sprintf("LogTrigger{blockNum: %d, blockHash: %s, txHash: %s, logIndex: %d}", blockNum, receipt.BlockHash.Hex(), txHash, logIndex))
- trigger = mustAutomationTrigger(txHash, logIndex, blockNum, receipt.BlockHash)
- workID = mustUpkeepWorkID(upkeepID, trigger)
+ workID := mustUpkeepWorkID(upkeepID, blockNum, receipt.BlockHash, txHash, logIndex)
message(fmt.Sprintf("workID computed: %s", hex.EncodeToString(workID[:])))
hasKey, err := keeperRegistry21.HasDedupKey(latestCallOpts, workID)
if err != nil {
@@ -232,82 +229,73 @@ func (k *Keeper) Debug(ctx context.Context, args []string) {
if checkResult.UpkeepFailureReason != 0 {
message(fmt.Sprintf("checkUpkeep failed with UpkeepFailureReason %d", checkResult.UpkeepFailureReason))
}
-
if checkResult.UpkeepFailureReason == uint8(encoding.UpkeepFailureReasonTargetCheckReverted) {
- mc := &models.MercuryCredentials{k.cfg.MercuryLegacyURL, k.cfg.MercuryURL, k.cfg.MercuryID, k.cfg.MercuryKey}
- mercuryConfig := evm21.NewMercuryConfig(mc, core.StreamsCompatibleABI)
- lggr, _ := logger.NewLogger()
- blockSub := &blockSubscriber{k.client}
- streams := streams.NewStreamsLookup(packer, mercuryConfig, blockSub, k.rpcClient, keeperRegistry21, lggr)
+ // TODO use the new streams lookup lib
+ //mc := &models.MercuryCredentials{k.cfg.MercuryLegacyURL, k.cfg.MercuryURL, k.cfg.MercuryID, k.cfg.MercuryKey}
+ //mercuryConfig := evm.NewMercuryConfig(mc, core.StreamsCompatibleABI)
+ //lggr, _ := logger.NewLogger()
+ //blockSub := &blockSubscriber{k.client}
+ //_ = streams.NewStreamsLookup(packer, mercuryConfig, blockSub, keeperRegistry21, k.rpcClient, lggr)
streamsLookupErr, err := packer.DecodeStreamsLookupRequest(checkResult.PerformData)
if err == nil {
message("upkeep reverted with StreamsLookup")
message(fmt.Sprintf("StreamsLookup data: {FeedParamKey: %s, Feeds: %v, TimeParamKey: %s, Time: %d, ExtraData: %s}", streamsLookupErr.FeedParamKey, streamsLookupErr.Feeds, streamsLookupErr.TimeParamKey, streamsLookupErr.Time.Uint64(), hexutil.Encode(streamsLookupErr.ExtraData)))
-
- streamsLookup := &mercury.StreamsLookup{
- StreamsLookupError: &mercury.StreamsLookupError{
- FeedParamKey: streamsLookupErr.FeedParamKey,
- Feeds: streamsLookupErr.Feeds,
- TimeParamKey: streamsLookupErr.TimeParamKey,
- Time: streamsLookupErr.Time,
- ExtraData: streamsLookupErr.ExtraData,
- },
- UpkeepId: upkeepID,
- Block: blockNum,
- }
-
- if streamsLookup.IsMercuryV02() {
+ if streamsLookupErr.FeedParamKey == feedIdHex && streamsLookupErr.TimeParamKey == blockNumber {
message("using mercury lookup v0.2")
- // check if upkeep is allowed to use mercury v0.2
- _, _, _, allowed, err := streams.AllowedToUseMercury(latestCallOpts, upkeepID)
+ // handle v0.2
+ cfg, err := keeperRegistry21.GetUpkeepPrivilegeConfig(triggerCallOpts, upkeepID)
if err != nil {
- failUnknown("failed to check if upkeep is allowed to use mercury", err)
+ failUnknown("failed to get upkeep privilege config ", err)
+ }
+ allowed := false
+ if len(cfg) > 0 {
+ var privilegeConfig streams.UpkeepPrivilegeConfig
+ if err := json.Unmarshal(cfg, &privilegeConfig); err != nil {
+ failUnknown("failed to unmarshal privilege config ", err)
+ }
+ allowed = privilegeConfig.MercuryEnabled
}
if !allowed {
resolveIneligible("upkeep reverted with StreamsLookup but is not allowed to access streams")
}
- } else if streamsLookup.IsMercuryV03() {
+ } else if streamsLookupErr.FeedParamKey != feedIDs || streamsLookupErr.TimeParamKey != timestamp {
// handle v0.3
- message("using mercury lookup v0.3")
- } else {
resolveIneligible("upkeep reverted with StreamsLookup but the configuration is invalid")
+ } else {
+ message("using mercury lookup v0.3")
}
+ streamsLookup := &StreamsLookup{streamsLookupErr.FeedParamKey, streamsLookupErr.Feeds, streamsLookupErr.TimeParamKey, streamsLookupErr.Time, streamsLookupErr.ExtraData, upkeepID, blockNum}
if k.cfg.MercuryLegacyURL == "" || k.cfg.MercuryURL == "" || k.cfg.MercuryID == "" || k.cfg.MercuryKey == "" {
failCheckConfig("Mercury configs not set properly, check your MERCURY_LEGACY_URL, MERCURY_URL, MERCURY_ID and MERCURY_KEY", nil)
}
-
- // do mercury request
- automationCheckResult := mustAutomationCheckResult(upkeepID, checkResult, trigger)
- values, err := streams.DoMercuryRequest(ctx, streamsLookup, &automationCheckResult)
-
- if automationCheckResult.IneligibilityReason == uint8(mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput) {
+ handler := NewMercuryLookupHandler(&MercuryCredentials{k.cfg.MercuryLegacyURL, k.cfg.MercuryURL, k.cfg.MercuryID, k.cfg.MercuryKey}, k.rpcClient)
+ state, failureReason, values, _, err := handler.doMercuryRequest(ctx, streamsLookup)
+ if failureReason == UpkeepFailureReasonInvalidRevertDataInput {
resolveIneligible("upkeep used invalid revert data")
}
- if automationCheckResult.PipelineExecutionState == uint8(mercury.InvalidMercuryRequest) {
+ if state == InvalidMercuryRequest {
resolveIneligible("the mercury request data is invalid")
}
if err != nil {
- resolveIneligible("failed to DoMercuryRequest")
+ failCheckConfig("failed to do mercury request ", err)
}
-
- // do checkCallback
- err = streams.CheckCallback(ctx, values, streamsLookup, &automationCheckResult)
+ callbackResult, err := keeperRegistry21.CheckCallback(triggerCallOpts, upkeepID, values, streamsLookup.extraData)
if err != nil {
failUnknown("failed to execute mercury callback ", err)
}
- if automationCheckResult.IneligibilityReason != 0 {
- message(fmt.Sprintf("checkCallback failed with UpkeepFailureReason %d", automationCheckResult.IneligibilityReason))
+ if callbackResult.UpkeepFailureReason != 0 {
+ message(fmt.Sprintf("checkCallback failed with UpkeepFailureReason %d", checkResult.UpkeepFailureReason))
}
- upkeepNeeded, performData = automationCheckResult.Eligible, automationCheckResult.PerformData
- // do tenderly simulations for checkCallback
- rawCall, err := core.RegistryABI.Pack("checkCallback", upkeepID, values, streamsLookup.ExtraData)
+ upkeepNeeded, performData = callbackResult.UpkeepNeeded, callbackResult.PerformData
+ // do tenderly simulations
+ rawCall, err := core.RegistryABI.Pack("checkCallback", upkeepID, values, streamsLookup.extraData)
if err != nil {
failUnknown("failed to pack raw checkCallback call", err)
}
addLink("checkCallback simulation", tenderlySimLink(k.cfg, chainID, blockNum, rawCall, registryAddress))
- rawCall, err = core.StreamsCompatibleABI.Pack("checkCallback", values, streamsLookup.ExtraData)
+ rawCall, err = core.StreamsCompatibleABI.Pack("checkCallback", values, streamsLookup.extraData)
if err != nil {
failUnknown("failed to pack raw checkCallback (direct) call", err)
}
@@ -329,23 +317,6 @@ func (k *Keeper) Debug(ctx context.Context, args []string) {
}
}
-func mustAutomationCheckResult(upkeepID *big.Int, checkResult iregistry21.CheckUpkeep, trigger ocr2keepers.Trigger) ocr2keepers.CheckResult {
- upkeepIdentifier := mustUpkeepIdentifier(upkeepID)
- checkResult2 := ocr2keepers.CheckResult{
- Eligible: checkResult.UpkeepNeeded,
- IneligibilityReason: checkResult.UpkeepFailureReason,
- UpkeepID: upkeepIdentifier,
- Trigger: trigger,
- WorkID: core.UpkeepWorkID(upkeepIdentifier, trigger),
- GasAllocated: 0,
- PerformData: checkResult.PerformData,
- FastGasWei: checkResult.FastGasWei,
- LinkNative: checkResult.LinkNative,
- }
-
- return checkResult2
-}
-
type blockSubscriber struct {
ethClient *ethclient.Client
}
@@ -399,27 +370,9 @@ func packTriggerData(log *types.Log, blockTime uint64) ([]byte, error) {
return b, nil
}
-func mustUpkeepWorkID(upkeepID *big.Int, trigger ocr2keepers.Trigger) [32]byte {
- upkeepIdentifier := mustUpkeepIdentifier(upkeepID)
-
- workID := core.UpkeepWorkID(upkeepIdentifier, trigger)
- workIDBytes, err := hex.DecodeString(workID)
- if err != nil {
- failUnknown("failed to decode workID", err)
- }
-
- var result [32]byte
- copy(result[:], workIDBytes[:])
- return result
-}
-
-func mustUpkeepIdentifier(upkeepID *big.Int) ocr2keepers.UpkeepIdentifier {
- upkeepIdentifier := &ocr2keepers.UpkeepIdentifier{}
- upkeepIdentifier.FromBigInt(upkeepID)
- return *upkeepIdentifier
-}
-
-func mustAutomationTrigger(txHash [32]byte, logIndex int64, blockNum uint64, blockHash [32]byte) ocr2keepers.Trigger {
+func mustUpkeepWorkID(upkeepID *big.Int, blockNum uint64, blockHash [32]byte, txHash [32]byte, logIndex int64) [32]byte {
+ // TODO - this is a copy of the code in core.UpkeepWorkID
+ // We should refactor that code to be more easily exported ex not rely on Trigger structs
trigger := ocr2keepers.Trigger{
LogTriggerExtension: &ocr2keepers.LogTriggerExtension{
TxHash: txHash,
@@ -428,7 +381,16 @@ func mustAutomationTrigger(txHash [32]byte, logIndex int64, blockNum uint64, blo
BlockHash: blockHash,
},
}
- return trigger
+ upkeepIdentifier := &ocr2keepers.UpkeepIdentifier{}
+ upkeepIdentifier.FromBigInt(upkeepID)
+ workID := core.UpkeepWorkID(*upkeepIdentifier, trigger)
+ workIDBytes, err := hex.DecodeString(workID)
+ if err != nil {
+ failUnknown("failed to decode workID", err)
+ }
+ var result [32]byte
+ copy(result[:], workIDBytes[:])
+ return result
}
func message(msg string) {
@@ -440,11 +402,11 @@ func warning(msg string) {
}
func resolveIneligible(msg string) {
- exit(fmt.Sprintf("✅ %s: this upkeep is not currently eligible", msg), nil, 0)
+ exit(fmt.Sprintf("✅ %s: this upkeep is not currently elligible", msg), nil, 0)
}
func resolveEligible() {
- exit("❌ this upkeep is currently eligible", nil, 0)
+ exit("❌ this upkeep is currently elligible", nil, 0)
}
func rerun(msg string, err error) {
@@ -545,3 +507,5 @@ func tenderlySimLink(cfg *config.Config, chainID int64, blockNumber uint64, inpu
}
return common.TenderlySimLink(responseJSON.Simulation.Id)
}
+
+// TODO - link to performUpkeep tx if exists
diff --git a/core/scripts/chaincli/handler/mercury_lookup_handler.go b/core/scripts/chaincli/handler/mercury_lookup_handler.go
new file mode 100644
index 00000000000..1bd4b2e183c
--- /dev/null
+++ b/core/scripts/chaincli/handler/mercury_lookup_handler.go
@@ -0,0 +1,534 @@
+package handler
+
+import (
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/avast/retry-go"
+ ethabi "github.com/ethereum/go-ethereum/accounts/abi"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/pkg/errors"
+)
+
+// MercuryLookupHandler is responsible for initiating the calls to the Mercury server
+// to determine whether the upkeeps are eligible
+type MercuryLookupHandler struct {
+ credentials *MercuryCredentials
+ httpClient HttpClient
+ rpcClient *rpc.Client
+}
+
+func NewMercuryLookupHandler(
+ credentials *MercuryCredentials,
+ rpcClient *rpc.Client,
+) *MercuryLookupHandler {
+ return &MercuryLookupHandler{
+ credentials: credentials,
+ httpClient: http.DefaultClient,
+ rpcClient: rpcClient,
+ }
+}
+
+type MercuryVersion string
+
+type StreamsLookup struct {
+ feedParamKey string
+ feeds []string
+ timeParamKey string
+ time *big.Int
+ extraData []byte
+ upkeepId *big.Int
+ block uint64
+}
+
+//go:generate mockery --quiet --name HttpClient --output ./mocks/ --case=underscore
+type HttpClient interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+type MercuryCredentials struct {
+ LegacyURL string
+ URL string
+ ClientID string
+ ClientKey string
+}
+
+func (mc *MercuryCredentials) Validate() bool {
+ return mc.URL != "" && mc.ClientID != "" && mc.ClientKey != ""
+}
+
+type MercuryData struct {
+ Index int
+ Error error
+ Retryable bool
+ Bytes [][]byte
+ State PipelineExecutionState
+}
+
+// MercuryV02Response represents a JSON structure used by Mercury v0.2
+type MercuryV02Response struct {
+ ChainlinkBlob string `json:"chainlinkBlob"`
+}
+
+// MercuryV03Response represents a JSON structure used by Mercury v0.3
+type MercuryV03Response struct {
+ Reports []MercuryV03Report `json:"reports"`
+}
+
+type MercuryV03Report struct {
+ FeedID string `json:"feedID"` // feed id in hex encoded
+ ValidFromTimestamp uint32 `json:"validFromTimestamp"`
+ ObservationsTimestamp uint32 `json:"observationsTimestamp"`
+ FullReport string `json:"fullReport"` // the actual hex encoded mercury report of this feed, can be sent to verifier
+}
+
+const (
+ // DefaultAllowListExpiration decides how long an upkeep's allow list info will be valid for.
+ DefaultAllowListExpiration = 20 * time.Minute
+ // CleanupInterval decides when the expired items in cache will be deleted.
+ CleanupInterval = 25 * time.Minute
+)
+
+const (
+ ApplicationJson = "application/json"
+ BlockNumber = "blockNumber" // valid for v0.2
+ FeedIDs = "feedIDs" // valid for v0.3
+ FeedIdHex = "feedIdHex" // valid for v0.2
+ HeaderAuthorization = "Authorization"
+ HeaderContentType = "Content-Type"
+ HeaderTimestamp = "X-Authorization-Timestamp"
+ HeaderSignature = "X-Authorization-Signature-SHA256"
+ HeaderUpkeepId = "X-Authorization-Upkeep-Id"
+ MercuryPathV2 = "/client?" // only used to access mercury v0.2 server
+ MercuryBatchPathV3 = "/api/v1/reports/bulk?" // only used to access mercury v0.3 server
+ RetryDelay = 500 * time.Millisecond
+ Timestamp = "timestamp" // valid for v0.3
+ TotalAttempt = 3
+ UserId = "userId"
+)
+
+type UpkeepFailureReason uint8
+type PipelineExecutionState uint8
+
+const (
+ // upkeep failure onchain reasons
+ UpkeepFailureReasonNone UpkeepFailureReason = 0
+ UpkeepFailureReasonUpkeepCancelled UpkeepFailureReason = 1
+ UpkeepFailureReasonUpkeepPaused UpkeepFailureReason = 2
+ UpkeepFailureReasonTargetCheckReverted UpkeepFailureReason = 3
+ UpkeepFailureReasonUpkeepNotNeeded UpkeepFailureReason = 4
+ UpkeepFailureReasonPerformDataExceedsLimit UpkeepFailureReason = 5
+ UpkeepFailureReasonInsufficientBalance UpkeepFailureReason = 6
+ UpkeepFailureReasonMercuryCallbackReverted UpkeepFailureReason = 7
+ UpkeepFailureReasonRevertDataExceedsLimit UpkeepFailureReason = 8
+ UpkeepFailureReasonRegistryPaused UpkeepFailureReason = 9
+ // leaving a gap here for more onchain failure reasons in the future
+ // upkeep failure offchain reasons
+ UpkeepFailureReasonMercuryAccessNotAllowed UpkeepFailureReason = 32
+ UpkeepFailureReasonTxHashNoLongerExists UpkeepFailureReason = 33
+ UpkeepFailureReasonInvalidRevertDataInput UpkeepFailureReason = 34
+ UpkeepFailureReasonSimulationFailed UpkeepFailureReason = 35
+ UpkeepFailureReasonTxHashReorged UpkeepFailureReason = 36
+
+ // pipeline execution error
+ NoPipelineError PipelineExecutionState = 0
+ CheckBlockTooOld PipelineExecutionState = 1
+ CheckBlockInvalid PipelineExecutionState = 2
+ RpcFlakyFailure PipelineExecutionState = 3
+ MercuryFlakyFailure PipelineExecutionState = 4
+ PackUnpackDecodeFailed PipelineExecutionState = 5
+ MercuryUnmarshalError PipelineExecutionState = 6
+ InvalidMercuryRequest PipelineExecutionState = 7
+ InvalidMercuryResponse PipelineExecutionState = 8 // this will only happen if Mercury server sends bad responses
+ UpkeepNotAuthorized PipelineExecutionState = 9
+)
+
+// UpkeepPrivilegeConfig represents the administrative offchain config for each upkeep. It can be set by s_upkeepPrivilegeManager
+// role on the registry. Upkeeps allowed to use Mercury server will have this set to true.
+type UpkeepPrivilegeConfig struct {
+ MercuryEnabled bool `json:"mercuryEnabled"`
+}
+
+// generateHMAC calculates a user HMAC for Mercury server authentication.
+func (mlh *MercuryLookupHandler) generateHMAC(method string, path string, body []byte, clientId string, secret string, ts int64) string {
+ bodyHash := sha256.New()
+ bodyHash.Write(body)
+ hashString := fmt.Sprintf("%s %s %s %s %d",
+ method,
+ path,
+ hex.EncodeToString(bodyHash.Sum(nil)),
+ clientId,
+ ts)
+ signedMessage := hmac.New(sha256.New, []byte(secret))
+ signedMessage.Write([]byte(hashString))
+ userHmac := hex.EncodeToString(signedMessage.Sum(nil))
+ return userHmac
+}
+
+// singleFeedRequest sends a v0.2 Mercury request for a single feed report.
+func (mlh *MercuryLookupHandler) singleFeedRequest(ctx context.Context, ch chan<- MercuryData, index int, ml *StreamsLookup) {
+ q := url.Values{
+ ml.feedParamKey: {ml.feeds[index]},
+ ml.timeParamKey: {ml.time.String()},
+ }
+ mercuryURL := mlh.credentials.LegacyURL
+ reqUrl := fmt.Sprintf("%s%s%s", mercuryURL, MercuryPathV2, q.Encode())
+ // mlh.logger.Debugf("request URL for upkeep %s feed %s: %s", ml.upkeepId.String(), ml.feeds[index], reqUrl)
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqUrl, nil)
+ if err != nil {
+ ch <- MercuryData{Index: index, Error: err, Retryable: false, State: InvalidMercuryRequest}
+ return
+ }
+
+ ts := time.Now().UTC().UnixMilli()
+ signature := mlh.generateHMAC(http.MethodGet, MercuryPathV2+q.Encode(), []byte{}, mlh.credentials.ClientID, mlh.credentials.ClientKey, ts)
+ req.Header.Set(HeaderContentType, ApplicationJson)
+ req.Header.Set(HeaderAuthorization, mlh.credentials.ClientID)
+ req.Header.Set(HeaderTimestamp, strconv.FormatInt(ts, 10))
+ req.Header.Set(HeaderSignature, signature)
+
+ // in the case of multiple retries here, use the last attempt's data
+ state := NoPipelineError
+ retryable := false
+ sent := false
+ retryErr := retry.Do(
+ func() error {
+ retryable = false
+ resp, err1 := mlh.httpClient.Do(req)
+ if err1 != nil {
+ // mlh.logger.Errorw("StreamsLookup GET request failed", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "feed", ml.feeds[index], "error", err1)
+ retryable = true
+ state = MercuryFlakyFailure
+ return err1
+ }
+ defer func(Body io.ReadCloser) {
+ err := Body.Close()
+ if err != nil {
+ // mlh.logger.Errorf("Encountered error when closing the body of the response in single feed: %s", err)
+ }
+ }(resp.Body)
+
+ body, err1 := io.ReadAll(resp.Body)
+ if err1 != nil {
+ retryable = false
+ state = InvalidMercuryResponse
+ return err1
+ }
+
+ if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusInternalServerError {
+ // mlh.logger.Errorw("StreamsLookup received retryable status code", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "statusCode", resp.StatusCode, "feed", ml.feeds[index])
+ retryable = true
+ state = MercuryFlakyFailure
+ return errors.New(strconv.FormatInt(int64(resp.StatusCode), 10))
+ } else if resp.StatusCode != http.StatusOK {
+ retryable = false
+ state = InvalidMercuryRequest
+ return fmt.Errorf("StreamsLookup upkeep %s block %s received status code %d for feed %s", ml.upkeepId.String(), ml.time.String(), resp.StatusCode, ml.feeds[index])
+ }
+
+ // mlh.logger.Debugf("at block %s upkeep %s received status code %d from mercury v0.2 with BODY=%s", ml.time.String(), ml.upkeepId.String(), resp.StatusCode, hexutil.Encode(body))
+
+ var m MercuryV02Response
+ err1 = json.Unmarshal(body, &m)
+ if err1 != nil {
+ // mlh.logger.Errorw("StreamsLookup failed to unmarshal body to MercuryResponse", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "feed", ml.feeds[index], "error", err1)
+ retryable = false
+ state = MercuryUnmarshalError
+ return err1
+ }
+ blobBytes, err1 := hexutil.Decode(m.ChainlinkBlob)
+ if err1 != nil {
+ // mlh.logger.Errorw("StreamsLookup failed to decode chainlinkBlob for feed", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "blob", m.ChainlinkBlob, "feed", ml.feeds[index], "error", err1)
+ retryable = false
+ state = InvalidMercuryResponse
+ return err1
+ }
+ ch <- MercuryData{
+ Index: index,
+ Bytes: [][]byte{blobBytes},
+ Retryable: false,
+ State: NoPipelineError,
+ }
+ sent = true
+ return nil
+ },
+ // only retry when the error is 404 Not Found or 500 Internal Server Error
+ retry.RetryIf(func(err error) bool {
+ return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError)
+ }),
+ retry.Context(ctx),
+ retry.Delay(RetryDelay),
+ retry.Attempts(TotalAttempt))
+
+ if !sent {
+ md := MercuryData{
+ Index: index,
+ Bytes: [][]byte{},
+ Retryable: retryable,
+ Error: fmt.Errorf("failed to request feed for %s: %w", ml.feeds[index], retryErr),
+ State: state,
+ }
+ ch <- md
+ }
+}
+
+// multiFeedsRequest sends a Mercury v0.3 request for a multi-feed report
+func (mlh *MercuryLookupHandler) multiFeedsRequest(ctx context.Context, ch chan<- MercuryData, ml *StreamsLookup) {
+ params := fmt.Sprintf("%s=%s&%s=%s", FeedIDs, strings.Join(ml.feeds, ","), Timestamp, ml.time.String())
+ reqUrl := fmt.Sprintf("%s%s%s", mlh.credentials.URL, MercuryBatchPathV3, params)
+ // mlh.logger.Debugf("request URL for upkeep %s userId %s: %s", ml.upkeepId.String(), mlh.credentials.ClientID, reqUrl)
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, reqUrl, nil)
+ if err != nil {
+ ch <- MercuryData{Index: 0, Error: err, Retryable: false, State: InvalidMercuryRequest}
+ return
+ }
+
+ ts := time.Now().UTC().UnixMilli()
+ signature := mlh.generateHMAC(http.MethodGet, MercuryBatchPathV3+params, []byte{}, mlh.credentials.ClientID, mlh.credentials.ClientKey, ts)
+ req.Header.Set(HeaderContentType, ApplicationJson)
+ // username here is often referred to as user id
+ req.Header.Set(HeaderAuthorization, mlh.credentials.ClientID)
+ req.Header.Set(HeaderTimestamp, strconv.FormatInt(ts, 10))
+ req.Header.Set(HeaderSignature, signature)
+ // mercury will inspect authorization headers above to make sure this user (in automation's context, this node) is eligible to access mercury
+ // and if it has an automation role. it will then look at this upkeep id to check if it has access to all the requested feeds.
+ req.Header.Set(HeaderUpkeepId, ml.upkeepId.String())
+
+ // in the case of multiple retries here, use the last attempt's data
+ state := NoPipelineError
+ retryable := false
+ sent := false
+ retryErr := retry.Do(
+ func() error {
+ retryable = false
+ resp, err1 := mlh.httpClient.Do(req)
+ if err1 != nil {
+ // mlh.logger.Errorw("StreamsLookup GET request fails for multi feed", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "error", err1)
+ retryable = true
+ state = MercuryFlakyFailure
+ return err1
+ }
+ defer func(Body io.ReadCloser) {
+ err := Body.Close()
+ if err != nil {
+ // mlh.logger.Errorf("Encountered error when closing the body of the response in the multi feed: %s", err)
+ }
+ }(resp.Body)
+ body, err1 := io.ReadAll(resp.Body)
+ if err1 != nil {
+ retryable = false
+ state = InvalidMercuryResponse
+ return err1
+ }
+
+ // mlh.logger.Infof("at timestamp %s upkeep %s received status code %d from mercury v0.3", ml.time.String(), ml.upkeepId.String(), resp.StatusCode)
+ if resp.StatusCode == http.StatusUnauthorized {
+ retryable = false
+ state = UpkeepNotAuthorized
+ return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by unauthorized upkeep", ml.time.String(), ml.upkeepId.String(), resp.StatusCode)
+ } else if resp.StatusCode == http.StatusBadRequest {
+ retryable = false
+ state = InvalidMercuryRequest
+ return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by invalid format of timestamp", ml.time.String(), ml.upkeepId.String(), resp.StatusCode)
+ } else if resp.StatusCode == http.StatusInternalServerError {
+ retryable = true
+ state = MercuryFlakyFailure
+ return fmt.Errorf("%d", http.StatusInternalServerError)
+ } else if resp.StatusCode == 420 {
+ // in 0.3, this will happen when missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds
+ retryable = false
+ state = InvalidMercuryRequest
+ return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3, most likely this is caused by missing/malformed query args, missing or bad required headers, non-existent feeds, or no permissions for feeds", ml.time.String(), ml.upkeepId.String(), resp.StatusCode)
+ } else if resp.StatusCode != http.StatusOK {
+ retryable = false
+ state = InvalidMercuryRequest
+ return fmt.Errorf("at timestamp %s upkeep %s received status code %d from mercury v0.3", ml.time.String(), ml.upkeepId.String(), resp.StatusCode)
+ }
+
+ var response MercuryV03Response
+ err1 = json.Unmarshal(body, &response)
+ if err1 != nil {
+ // mlh.logger.Errorw("StreamsLookup failed to unmarshal body to MercuryResponse for multi feed", "upkeepID", ml.upkeepId.String(), "time", ml.time.String(), "error", err1)
+ retryable = false
+ state = MercuryUnmarshalError
+ return err1
+ }
+ // in v0.3, if some feeds are not available, the server will only return available feeds, but we need to make sure ALL feeds are retrieved before calling user contract
+ // hence, retry in this case. retry will help when we send a very new timestamp and reports are not yet generated
+ if len(response.Reports) != len(ml.feeds) {
+ // TODO: AUTO-5044: calculate what reports are missing and log a warning
+ retryable = true
+ state = MercuryFlakyFailure
+ return fmt.Errorf("%d", http.StatusNotFound)
+ }
+ var reportBytes [][]byte
+ for _, rsp := range response.Reports {
+ b, err := hexutil.Decode(rsp.FullReport)
+ if err != nil {
+ retryable = false
+ state = InvalidMercuryResponse
+ return err
+ }
+ reportBytes = append(reportBytes, b)
+ }
+ ch <- MercuryData{
+ Index: 0,
+ Bytes: reportBytes,
+ Retryable: false,
+ State: NoPipelineError,
+ }
+ sent = true
+ return nil
+ },
+ // only retry when the error is 404 Not Found or 500 Internal Server Error
+ retry.RetryIf(func(err error) bool {
+ return err.Error() == fmt.Sprintf("%d", http.StatusNotFound) || err.Error() == fmt.Sprintf("%d", http.StatusInternalServerError)
+ }),
+ retry.Context(ctx),
+ retry.Delay(RetryDelay),
+ retry.Attempts(TotalAttempt))
+
+ if !sent {
+ md := MercuryData{
+ Index: 0,
+ Bytes: [][]byte{},
+ Retryable: retryable,
+ Error: retryErr,
+ State: state,
+ }
+ ch <- md
+ }
+}
+
+// doMercuryRequest sends requests to Mercury API to retrieve ChainlinkBlob.
+func (mlh *MercuryLookupHandler) doMercuryRequest(ctx context.Context, ml *StreamsLookup) (PipelineExecutionState, UpkeepFailureReason, [][]byte, bool, error) {
+ var isMercuryV03 bool
+ resultLen := len(ml.feeds)
+ ch := make(chan MercuryData, resultLen)
+ if len(ml.feeds) == 0 {
+ return NoPipelineError, UpkeepFailureReasonInvalidRevertDataInput, nil, false, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", ml.feedParamKey, ml.timeParamKey, ml.feeds)
+ }
+ if ml.feedParamKey == FeedIdHex && ml.timeParamKey == BlockNumber {
+ // only v0.2
+ for i := range ml.feeds {
+ go mlh.singleFeedRequest(ctx, ch, i, ml)
+ }
+ } else if ml.feedParamKey == FeedIDs && ml.timeParamKey == Timestamp {
+ // only v0.3
+ resultLen = 1
+ isMercuryV03 = true
+ ch = make(chan MercuryData, resultLen)
+ go mlh.multiFeedsRequest(ctx, ch, ml)
+ } else {
+ return NoPipelineError, UpkeepFailureReasonInvalidRevertDataInput, nil, false, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", ml.feedParamKey, ml.timeParamKey, ml.feeds)
+ }
+
+ var reqErr error
+ results := make([][]byte, len(ml.feeds))
+ retryable := true
+ allSuccess := true
+ // in v0.2, use the last execution error as the state, if no execution errors, state will be no error
+ state := NoPipelineError
+ for i := 0; i < resultLen; i++ {
+ m := <-ch
+ if m.Error != nil {
+ if reqErr == nil {
+ reqErr = errors.New(m.Error.Error())
+ } else {
+ reqErr = errors.New(reqErr.Error() + m.Error.Error())
+ }
+ retryable = retryable && m.Retryable
+ allSuccess = false
+ if m.State != NoPipelineError {
+ state = m.State
+ }
+ continue
+ }
+ if isMercuryV03 {
+ results = m.Bytes
+ } else {
+ results[m.Index] = m.Bytes[0]
+ }
+ }
+ // only retry when not all successful AND none are not retryable
+ return state, UpkeepFailureReasonNone, results, retryable && !allSuccess, reqErr
+}
+
+// decodeStreamsLookup decodes the revert error StreamsLookup(string feedParamKey, string[] feeds, string timeParamKey, uint256 time, byte[] extraData)
+// func (mlh *MercuryLookupHandler) decodeStreamsLookup(data []byte) (*StreamsLookup, error) {
+// e := mlh.mercuryConfig.Abi.Errors["StreamsLookup"]
+// unpack, err := e.Unpack(data)
+// if err != nil {
+// return nil, fmt.Errorf("unpack error: %w", err)
+// }
+// errorParameters := unpack.([]interface{})
+
+// return &StreamsLookup{
+// feedParamKey: *abi.ConvertType(errorParameters[0], new(string)).(*string),
+// feeds: *abi.ConvertType(errorParameters[1], new([]string)).(*[]string),
+// timeParamKey: *abi.ConvertType(errorParameters[2], new(string)).(*string),
+// time: *abi.ConvertType(errorParameters[3], new(*big.Int)).(**big.Int),
+// extraData: *abi.ConvertType(errorParameters[4], new([]byte)).(*[]byte),
+// }, nil
+// }
+
+// allowedToUseMercury retrieves upkeep's administrative offchain config and decode a mercuryEnabled bool to indicate if
+// this upkeep is allowed to use Mercury service.
+// func (mlh *MercuryLookupHandler) allowedToUseMercury(upkeep models.Upkeep) (bool, error) {
+// allowed, ok := mlh.mercuryConfig.AllowListCache.Get(upkeep.Admin.Hex())
+// if ok {
+// return allowed.(bool), nil
+// }
+
+// if upkeep.UpkeepPrivilegeConfig == nil {
+// return false, fmt.Errorf("the upkeep privilege config was not retrieved for upkeep with ID %s", upkeep.UpkeepID)
+// }
+
+// if len(upkeep.UpkeepPrivilegeConfig) == 0 {
+// return false, fmt.Errorf("the upkeep privilege config is empty")
+// }
+
+// var a UpkeepPrivilegeConfig
+// err := json.Unmarshal(upkeep.UpkeepPrivilegeConfig, &a)
+// if err != nil {
+// return false, fmt.Errorf("failed to unmarshal privilege config for upkeep ID %s: %v", upkeep.UpkeepID, err)
+// }
+
+// mlh.mercuryConfig.AllowListCache.Set(upkeep.Admin.Hex(), a.MercuryEnabled, cache.DefaultExpiration)
+// return a.MercuryEnabled, nil
+// }
+
+func (mlh *MercuryLookupHandler) CheckCallback(ctx context.Context, values [][]byte, lookup *StreamsLookup, registryABI ethabi.ABI, registryAddress common.Address) (hexutil.Bytes, error) {
+ payload, err := registryABI.Pack("checkCallback", lookup.upkeepId, values, lookup.extraData)
+ if err != nil {
+ return nil, err
+ }
+
+ var theBytes hexutil.Bytes
+ args := map[string]interface{}{
+ "to": registryAddress.Hex(),
+ "data": hexutil.Bytes(payload),
+ }
+
+ // call checkCallback function at the block which OCR3 has agreed upon
+ err = mlh.rpcClient.CallContext(ctx, &theBytes, "eth_call", args, hexutil.EncodeUint64(lookup.block))
+ if err != nil {
+ return nil, err
+ }
+ return theBytes, nil
+}
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 21f00df02de..c7af0541c12 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -7,6 +7,7 @@ replace github.com/smartcontractkit/chainlink/v2 => ../../
require (
github.com/ava-labs/coreth v0.12.1
+ github.com/avast/retry-go v3.0.0+incompatible
github.com/docker/docker v24.0.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/ethereum/go-ethereum v1.12.0
@@ -18,6 +19,7 @@ require (
github.com/montanaflynn/stats v0.7.1
github.com/olekukonko/tablewriter v0.0.5
github.com/pelletier/go-toml/v2 v2.1.0
+ github.com/pkg/errors v0.9.1
github.com/shopspring/decimal v1.3.1
github.com/smartcontractkit/chainlink-automation v1.0.1
github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868
@@ -281,7 +283,6 @@ require (
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect
- github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/pressly/goose/v3 v3.16.0 // indirect
diff --git a/core/scripts/go.sum b/core/scripts/go.sum
index 36504924e16..7cea79eb76e 100644
--- a/core/scripts/go.sum
+++ b/core/scripts/go.sum
@@ -152,6 +152,8 @@ github.com/ava-labs/avalanchego v1.10.1 h1:lBeamJ1iNq+p2oKg2nAs+A65m8vhSDjkiTDbw
github.com/ava-labs/avalanchego v1.10.1/go.mod h1:ZvSXWlbkUKlbk3BsWx29a+8eVHe/WBsOxh55BSGoeRk=
github.com/ava-labs/coreth v0.12.1 h1:EWSkFGHGVUxmu1pnSK/2pdcxaAVHbGspHqO3Ag+i7sA=
github.com/ava-labs/coreth v0.12.1/go.mod h1:/5x54QlIKjlPebkdzTA5ic9wXdejbWOnQosztkv9jxo=
+github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
+github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o=
github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc=
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go
index cb9e2dd6752..aec23431921 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams.go
@@ -16,6 +16,7 @@ import (
"github.com/patrickmn/go-cache"
ocr2keepers "github.com/smartcontractkit/chainlink-automation/pkg/v3/types"
+
"github.com/smartcontractkit/chainlink-common/pkg/services"
iregistry21 "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/i_keeper_registry_master_wrapper_2_1"
@@ -91,9 +92,8 @@ func NewStreamsLookup(
// Lookup looks through check upkeep results looking for any that need off chain lookup
func (s *streams) Lookup(ctx context.Context, checkResults []ocr2keepers.CheckResult) []ocr2keepers.CheckResult {
lookups := map[int]*mercury.StreamsLookup{}
- for _, checkResult := range checkResults {
- copyCheckResult := checkResult
- s.buildResult(ctx, ©CheckResult, lookups)
+ for i, checkResult := range checkResults {
+ s.buildResult(ctx, i, checkResult, checkResults, lookups)
}
var wg sync.WaitGroup
@@ -101,7 +101,7 @@ func (s *streams) Lookup(ctx context.Context, checkResults []ocr2keepers.CheckRe
wg.Add(1)
func(i int, lookup *mercury.StreamsLookup) {
s.threadCtrl.Go(func(ctx context.Context) {
- s.doLookup(ctx, &wg, lookup, &checkResults[i])
+ s.doLookup(ctx, &wg, lookup, i, checkResults)
})
}(i, lookup)
}
@@ -112,7 +112,7 @@ func (s *streams) Lookup(ctx context.Context, checkResults []ocr2keepers.CheckRe
}
// buildResult checks if the upkeep is allowed by Mercury and builds a streams lookup request from the check result
-func (s *streams) buildResult(ctx context.Context, checkResult *ocr2keepers.CheckResult, lookups map[int]*mercury.StreamsLookup) {
+func (s *streams) buildResult(ctx context.Context, i int, checkResult ocr2keepers.CheckResult, checkResults []ocr2keepers.CheckResult, lookups map[int]*mercury.StreamsLookup) {
lookupLggr := s.lggr.With("where", "StreamsLookup")
if checkResult.IneligibilityReason != uint8(mercury.MercuryUpkeepFailureReasonTargetCheckReverted) {
// Streams Lookup only works when upkeep target check reverts
@@ -129,7 +129,7 @@ func (s *streams) buildResult(ctx context.Context, checkResult *ocr2keepers.Chec
// Try to decode the revert error into streams lookup format. User upkeeps can revert with any reason, see if they
// tried to call mercury
- lookupLggr.Infof("at block %d upkeep %s trying to DecodeStreamsLookupRequest performData=%s", block, upkeepId, hexutil.Encode(checkResult.PerformData))
+ lookupLggr.Infof("at block %d upkeep %s trying to DecodeStreamsLookupRequest performData=%s", block, upkeepId, hexutil.Encode(checkResults[i].PerformData))
streamsLookupErr, err := s.packer.DecodeStreamsLookupRequest(checkResult.PerformData)
if err != nil {
lookupLggr.Debugf("at block %d upkeep %s DecodeStreamsLookupRequest failed: %v", block, upkeepId, err)
@@ -139,7 +139,7 @@ func (s *streams) buildResult(ctx context.Context, checkResult *ocr2keepers.Chec
streamsLookupResponse := &mercury.StreamsLookup{StreamsLookupError: streamsLookupErr}
if len(streamsLookupResponse.Feeds) == 0 {
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput)
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput)
lookupLggr.Debugf("at block %s upkeep %s has empty feeds array", block, upkeepId)
return
}
@@ -148,21 +148,21 @@ func (s *streams) buildResult(ctx context.Context, checkResult *ocr2keepers.Chec
if streamsLookupResponse.IsMercuryV02() {
// check permission on the registry for mercury v0.2
opts := s.buildCallOpts(ctx, block)
- if state, reason, retryable, allowed, err := s.AllowedToUseMercury(opts, upkeepId.BigInt()); err != nil {
+ if state, reason, retryable, allowed, err := s.allowedToUseMercury(opts, upkeepId.BigInt()); err != nil {
lookupLggr.Warnf("at block %s upkeep %s failed to query mercury allow list: %s", block, upkeepId, err)
- checkResult.PipelineExecutionState = uint8(state)
- checkResult.IneligibilityReason = uint8(reason)
- checkResult.Retryable = retryable
+ checkResults[i].PipelineExecutionState = uint8(state)
+ checkResults[i].IneligibilityReason = uint8(reason)
+ checkResults[i].Retryable = retryable
return
} else if !allowed {
lookupLggr.Debugf("at block %d upkeep %s NOT allowed to query Mercury server", block, upkeepId)
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonMercuryAccessNotAllowed)
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonMercuryAccessNotAllowed)
return
}
} else if streamsLookupResponse.IsMercuryVersionUnkown() {
// if mercury version cannot be determined, set failure reason
lookupLggr.Debugf("at block %d upkeep %s NOT allowed to query Mercury server", block, upkeepId)
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput)
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput)
return
}
@@ -171,103 +171,71 @@ func (s *streams) buildResult(ctx context.Context, checkResult *ocr2keepers.Chec
// in the revert for mercury v0.2, which is denoted by time in the struct bc starting from v0.3, only timestamp will be supported
streamsLookupResponse.Block = uint64(block.Int64())
lookupLggr.Infof("at block %d upkeep %s DecodeStreamsLookupRequest feedKey=%s timeKey=%s feeds=%v time=%s extraData=%s", block, upkeepId, streamsLookupResponse.FeedParamKey, streamsLookupResponse.TimeParamKey, streamsLookupResponse.Feeds, streamsLookupResponse.Time, hexutil.Encode(streamsLookupResponse.ExtraData))
- lookups[len(lookups)] = streamsLookupResponse
+ lookups[i] = streamsLookupResponse
}
-func (s *streams) doLookup(ctx context.Context, wg *sync.WaitGroup, lookup *mercury.StreamsLookup, checkResult *ocr2keepers.CheckResult) {
+func (s *streams) doLookup(ctx context.Context, wg *sync.WaitGroup, lookup *mercury.StreamsLookup, i int, checkResults []ocr2keepers.CheckResult) {
defer wg.Done()
- values, err := s.DoMercuryRequest(ctx, lookup, checkResult)
- if err != nil {
- s.lggr.Errorf("at block %d upkeep %s requested time %s DoMercuryRequest err: %s", lookup.Block, lookup.UpkeepId, lookup.Time, err.Error())
- }
+ state, reason, values, retryable, retryInterval, err := mercury.NoPipelineError, mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0*time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", lookup.FeedParamKey, lookup.TimeParamKey, lookup.Feeds)
+ pluginRetryKey := generatePluginRetryKey(checkResults[i].WorkID, lookup.Block)
- if err := s.CheckCallback(ctx, values, lookup, checkResult); err != nil {
- s.lggr.Errorf("at block %d upkeep %s requested time %s CheckCallback err: %s", lookup.Block, lookup.UpkeepId, lookup.Time, err.Error())
+ if lookup.IsMercuryV02() {
+ state, reason, values, retryable, retryInterval, err = s.v02Client.DoRequest(ctx, lookup, pluginRetryKey)
+ } else if lookup.IsMercuryV03() {
+ state, reason, values, retryable, retryInterval, err = s.v03Client.DoRequest(ctx, lookup, pluginRetryKey)
}
-}
-func (s *streams) CheckCallback(ctx context.Context, values [][]byte, lookup *mercury.StreamsLookup, checkResult *ocr2keepers.CheckResult) error {
- payload, err := s.abi.Pack("checkCallback", lookup.UpkeepId, values, lookup.ExtraData)
if err != nil {
- s.lggr.Errorf("at block %d upkeep %s checkCallback packing err: %s", lookup.Block, lookup.UpkeepId, err.Error())
- checkResult.Retryable = false
- checkResult.PipelineExecutionState = uint8(mercury.PackUnpackDecodeFailed)
- return err
+ s.lggr.Errorf("at block %d upkeep %s requested time %s retryable %v retryInterval %s doMercuryRequest: %s", lookup.Block, lookup.UpkeepId, lookup.Time, retryable, retryInterval, err.Error())
+ checkResults[i].Retryable = retryable
+ checkResults[i].RetryInterval = retryInterval
+ checkResults[i].PipelineExecutionState = uint8(state)
+ checkResults[i].IneligibilityReason = uint8(reason)
+ return
}
- var mercuryBytes hexutil.Bytes
- args := map[string]interface{}{
- "to": s.registry.Address().Hex(),
- "data": hexutil.Bytes(payload),
+ for j, v := range values {
+ s.lggr.Infof("at block %d upkeep %s requested time %s doMercuryRequest values[%d]: %s", lookup.Block, lookup.UpkeepId, lookup.Time, j, hexutil.Encode(v))
}
- // call checkCallback function at the block which OCR3 has agreed upon
- if err = s.client.CallContext(ctx, &mercuryBytes, "eth_call", args, hexutil.EncodeUint64(lookup.Block)); err != nil {
+ state, retryable, mercuryBytes, err := s.checkCallback(ctx, values, lookup)
+ if err != nil {
s.lggr.Errorf("at block %d upkeep %s checkCallback err: %s", lookup.Block, lookup.UpkeepId, err.Error())
- checkResult.Retryable = true
- checkResult.PipelineExecutionState = uint8(mercury.RpcFlakyFailure)
- return err
+ checkResults[i].Retryable = retryable
+ checkResults[i].PipelineExecutionState = uint8(state)
+ return
}
-
s.lggr.Infof("at block %d upkeep %s requested time %s checkCallback mercuryBytes: %s", lookup.Block, lookup.UpkeepId, lookup.Time, hexutil.Encode(mercuryBytes))
unpackCallBackState, needed, performData, failureReason, _, err := s.packer.UnpackCheckCallbackResult(mercuryBytes)
if err != nil {
s.lggr.Errorf("at block %d upkeep %s requested time %s UnpackCheckCallbackResult err: %s", lookup.Block, lookup.UpkeepId, lookup.Time, err.Error())
- checkResult.PipelineExecutionState = unpackCallBackState
- return err
+ checkResults[i].PipelineExecutionState = unpackCallBackState
+ return
}
if failureReason == uint8(mercury.MercuryUpkeepFailureReasonMercuryCallbackReverted) {
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonMercuryCallbackReverted)
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonMercuryCallbackReverted)
s.lggr.Debugf("at block %d upkeep %s requested time %s mercury callback reverts", lookup.Block, lookup.UpkeepId, lookup.Time)
- return fmt.Errorf("at block %d upkeep %s requested time %s mercury callback reverts", lookup.Block, lookup.UpkeepId, lookup.Time)
-
+ return
}
if !needed {
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonUpkeepNotNeeded)
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonUpkeepNotNeeded)
s.lggr.Debugf("at block %d upkeep %s requested time %s callback reports upkeep not needed", lookup.Block, lookup.UpkeepId, lookup.Time)
- return fmt.Errorf("at block %d upkeep %s requested time %s callback reports upkeep not needed", lookup.Block, lookup.UpkeepId, lookup.Time)
+ return
}
- checkResult.IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonNone)
- checkResult.Eligible = true
- checkResult.PerformData = performData
+ checkResults[i].IneligibilityReason = uint8(mercury.MercuryUpkeepFailureReasonNone)
+ checkResults[i].Eligible = true
+ checkResults[i].PerformData = performData
s.lggr.Infof("at block %d upkeep %s requested time %s successful with perform data: %s", lookup.Block, lookup.UpkeepId, lookup.Time, hexutil.Encode(performData))
-
- return nil
-}
-
-func (s *streams) DoMercuryRequest(ctx context.Context, lookup *mercury.StreamsLookup, checkResult *ocr2keepers.CheckResult) ([][]byte, error) {
- state, reason, values, retryable, retryInterval, err := mercury.NoPipelineError, mercury.MercuryUpkeepFailureReasonInvalidRevertDataInput, [][]byte{}, false, 0*time.Second, fmt.Errorf("invalid revert data input: feed param key %s, time param key %s, feeds %s", lookup.FeedParamKey, lookup.TimeParamKey, lookup.Feeds)
- pluginRetryKey := generatePluginRetryKey(checkResult.WorkID, lookup.Block)
-
- if lookup.IsMercuryV02() {
- state, reason, values, retryable, retryInterval, err = s.v02Client.DoRequest(ctx, lookup, pluginRetryKey)
- } else if lookup.IsMercuryV03() {
- state, reason, values, retryable, retryInterval, err = s.v03Client.DoRequest(ctx, lookup, pluginRetryKey)
- }
-
- if err != nil {
- s.lggr.Errorf("at block %d upkeep %s requested time %s retryable %v retryInterval %s doMercuryRequest: %s", lookup.Block, lookup.UpkeepId, lookup.Time, retryable, retryInterval, err.Error())
- checkResult.Retryable = retryable
- checkResult.RetryInterval = retryInterval
- checkResult.PipelineExecutionState = uint8(state)
- checkResult.IneligibilityReason = uint8(reason)
- return nil, err
- }
-
- for j, v := range values {
- s.lggr.Infof("at block %d upkeep %s requested time %s doMercuryRequest values[%d]: %s", lookup.Block, lookup.UpkeepId, lookup.Time, j, hexutil.Encode(v))
- }
- return values, nil
}
-// AllowedToUseMercury retrieves upkeep's administrative offchain config and decode a mercuryEnabled bool to indicate if
+// allowedToUseMercury retrieves upkeep's administrative offchain config and decode a mercuryEnabled bool to indicate if
// this upkeep is allowed to use Mercury service.
-func (s *streams) AllowedToUseMercury(opts *bind.CallOpts, upkeepId *big.Int) (state mercury.MercuryUpkeepState, reason mercury.MercuryUpkeepFailureReason, retryable bool, allow bool, err error) {
+func (s *streams) allowedToUseMercury(opts *bind.CallOpts, upkeepId *big.Int) (state mercury.MercuryUpkeepState, reason mercury.MercuryUpkeepFailureReason, retryable bool, allow bool, err error) {
allowed, ok := s.mercuryConfig.IsUpkeepAllowed(upkeepId.String())
if ok {
return mercury.NoPipelineError, mercury.MercuryUpkeepFailureReasonNone, false, allowed.(bool), nil
@@ -287,6 +255,7 @@ func (s *streams) AllowedToUseMercury(opts *bind.CallOpts, upkeepId *big.Int) (s
"data": hexutil.Bytes(payload),
}
+ // call checkCallback function at the block which OCR3 has agreed upon
if err = s.client.CallContext(opts.Context, &resultBytes, "eth_call", args, hexutil.EncodeBig(opts.BlockNumber)); err != nil {
return mercury.RpcFlakyFailure, mercury.MercuryUpkeepFailureReasonNone, true, false, fmt.Errorf("failed to get upkeep privilege config: %v", err)
}
@@ -312,6 +281,26 @@ func (s *streams) AllowedToUseMercury(opts *bind.CallOpts, upkeepId *big.Int) (s
return mercury.NoPipelineError, mercury.MercuryUpkeepFailureReasonNone, false, privilegeConfig.MercuryEnabled, nil
}
+func (s *streams) checkCallback(ctx context.Context, values [][]byte, lookup *mercury.StreamsLookup) (mercury.MercuryUpkeepState, bool, hexutil.Bytes, error) {
+ payload, err := s.abi.Pack("checkCallback", lookup.UpkeepId, values, lookup.ExtraData)
+ if err != nil {
+ return mercury.PackUnpackDecodeFailed, false, nil, err
+ }
+
+ var b hexutil.Bytes
+ args := map[string]interface{}{
+ "to": s.registry.Address().Hex(),
+ "data": hexutil.Bytes(payload),
+ }
+
+ // call checkCallback function at the block which OCR3 has agreed upon
+ if err := s.client.CallContext(ctx, &b, "eth_call", args, hexutil.EncodeUint64(lookup.Block)); err != nil {
+ return mercury.RpcFlakyFailure, true, nil, err
+ }
+
+ return mercury.NoPipelineError, false, b, nil
+}
+
func (s *streams) buildCallOpts(ctx context.Context, block *big.Int) *bind.CallOpts {
opts := bind.CallOpts{
Context: ctx,
diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go
index 2475244b4d0..abcc37dca18 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams/streams_test.go
@@ -126,7 +126,6 @@ func TestStreams_CheckCallback(t *testing.T) {
tests := []struct {
name string
lookup *mercury.StreamsLookup
- input []ocr2keepers.CheckResult
values [][]byte
statusCode int
@@ -154,9 +153,6 @@ func TestStreams_CheckCallback(t *testing.T) {
UpkeepId: upkeepId,
Block: bn,
},
- input: []ocr2keepers.CheckResult{
- {},
- },
values: values,
statusCode: http.StatusOK,
callbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 48, 120, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
@@ -189,9 +185,6 @@ func TestStreams_CheckCallback(t *testing.T) {
UpkeepId: upkeepId,
Block: bn,
},
- input: []ocr2keepers.CheckResult{
- {},
- },
values: values,
statusCode: http.StatusOK,
callbackResp: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
@@ -223,9 +216,6 @@ func TestStreams_CheckCallback(t *testing.T) {
UpkeepId: upkeepId,
Block: bn,
},
- input: []ocr2keepers.CheckResult{
- {},
- },
values: values,
statusCode: http.StatusOK,
callbackResp: []byte{},
@@ -265,10 +255,10 @@ func TestStreams_CheckCallback(t *testing.T) {
}).Once()
s.client = client
- err = s.CheckCallback(testutils.Context(t), tt.values, tt.lookup, &tt.input[0])
- tt.wantErr(t, err, fmt.Sprintf("Error assertion failed: %v", tt.name))
- assert.Equal(t, uint8(tt.state), tt.input[0].PipelineExecutionState)
- assert.Equal(t, tt.retryable, tt.input[0].Retryable)
+ state, retryable, _, err := s.checkCallback(testutils.Context(t), tt.values, tt.lookup)
+ tt.wantErr(t, err, fmt.Sprintf("Error asserion failed: %v", tt.name))
+ assert.Equal(t, tt.state, state)
+ assert.Equal(t, tt.retryable, retryable)
})
}
}
@@ -444,7 +434,7 @@ func TestStreams_AllowedToUseMercury(t *testing.T) {
BlockNumber: big.NewInt(10),
}
- state, reason, retryable, allowed, err := s.AllowedToUseMercury(opts, upkeepId)
+ state, reason, retryable, allowed, err := s.allowedToUseMercury(opts, upkeepId)
assert.Equal(t, tt.err, err)
assert.Equal(t, tt.allowed, allowed)
assert.Equal(t, tt.state, state)
From bc8023bef6e8c56812833681e909595922ba888b Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Thu, 7 Dec 2023 08:54:12 -0800
Subject: [PATCH 31/40] Bump version for keepers hotfix
Signed-off-by: Sneha Agnihotri
---
VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index 860487ca19c..37c2961c243 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.7.1
+2.7.2
From 20cc4a426574e199249775797539a3b02b57ebef Mon Sep 17 00:00:00 2001
From: Patrick
Date: Thu, 7 Dec 2023 18:05:42 -0500
Subject: [PATCH 32/40] Cherry-pick P2P config fix (#11516)
* core/services/chainlink: skip P2P Peer Wrapper when unused (#11411)
(cherry picked from commit 7e7b84bb1b8a03d17c9b4dd49932854b0ea36c11)
* fix/p2p - adding changelog
* fix failing CI tests for 2.7.1
(cherry picked from commit 39ba467207bf2f5cf73f5a067616261bcba8499c)
* bump solana version in tests built
(cherry picked from commit 19e3b4ed835bb664cbb642af4c8355307153f93a)
---------
Co-authored-by: Jordan Krage
Co-authored-by: anirudhwarrier <12178754+anirudhwarrier@users.noreply.github.com>
Co-authored-by: Tate
---
.github/workflows/integration-tests.yml | 4 ++--
core/services/chainlink/application.go | 4 +++-
docs/CHANGELOG.md | 5 +++++
integration-tests/go.mod | 2 +-
integration-tests/go.sum | 4 ++--
5 files changed, 13 insertions(+), 6 deletions(-)
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index aadb14f1284..b1a1e663569 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -747,7 +747,7 @@ jobs:
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
with:
repository: smartcontractkit/chainlink-solana
- ref: 23816fcf7d380a30c87b6d87e4fb0ca94419b259 # swtich back to this after the next solana release${{ needs.get_solana_sha.outputs.sha }}
+ ref: a28100b7f2954604a8ca2ff9ec7bccc6ec952953 # swtich back to this after the next solana release${{ needs.get_solana_sha.outputs.sha }}
- name: Build Test Image
if: needs.changes.outputs.src == 'true' && needs.solana-test-image-exists.outputs.exists == 'false'
uses: ./.github/actions/build-test-image
@@ -805,7 +805,7 @@ jobs:
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
with:
repository: smartcontractkit/chainlink-solana
- ref: ${{ needs.get_solana_sha.outputs.sha }}
+ ref: a28100b7f2954604a8ca2ff9ec7bccc6ec952953 # temporarily using specific commit for release branch ${{ needs.get_solana_sha.outputs.sha }}
- name: Run Setup
if: needs.changes.outputs.src == 'true'
uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/setup-run-tests-environment@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13
diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go
index 354f0479042..9829036e13f 100644
--- a/core/services/chainlink/application.go
+++ b/core/services/chainlink/application.go
@@ -325,7 +325,9 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
}
var peerWrapper *ocrcommon.SingletonPeerWrapper
- if cfg.P2P().Enabled() {
+ if !cfg.OCR().Enabled() && !cfg.OCR2().Enabled() {
+ globalLogger.Debug("P2P stack not needed")
+ } else if cfg.P2P().Enabled() {
if err := ocrcommon.ValidatePeerWrapperConfig(cfg.P2P()); err != nil {
return nil, err
}
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 50c8a389932..2a293253132 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -12,6 +12,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
+## 2.7.2 - 2023-12-7
+
+### Fixed
+
+- Fixed a bug that caused nodes without OCR or OCR2 enabled to fail config validation if `P2P.V2` was not explicitly disabled. With this fix, NOPs will not have to make changes to their config.
## 2.7.1 - 2023-11-21
diff --git a/integration-tests/go.mod b/integration-tests/go.mod
index 7dd2d017785..b90ba784a38 100644
--- a/integration-tests/go.mod
+++ b/integration-tests/go.mod
@@ -21,7 +21,7 @@ require (
github.com/segmentio/ksuid v1.0.4
github.com/slack-go/slack v0.12.2
github.com/smartcontractkit/chainlink-env v0.38.3
- github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36
+ github.com/smartcontractkit/chainlink-testing-framework v1.17.13
github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000
github.com/smartcontractkit/libocr v0.0.0-20231020123319-d255366a6545
github.com/smartcontractkit/ocr2keepers v0.7.27
diff --git a/integration-tests/go.sum b/integration-tests/go.sum
index 3be74077277..8c3dc2c65c2 100644
--- a/integration-tests/go.sum
+++ b/integration-tests/go.sum
@@ -2370,8 +2370,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab0
github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231023133638-72f4e799ab05/go.mod h1:o0Pn1pbaUluboaK6/yhf8xf7TiFCkyFl6WUOdwqamuU=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb h1:HiluOfEVGOQTM6BTDImOqYdMZZ7qq7fkZ3TJdmItNr8=
github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20231024133459-1ef3a11319eb/go.mod h1:/30flFG4L/iCYAFeA3DUzR0xuHSxAMONiWTzyzvsNwo=
-github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36 h1:ow84QG8vEHMvfjGg0RF8HNYh80WcHci6PIenXyY6K8Y=
-github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231018101901-23824db88d36/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk=
+github.com/smartcontractkit/chainlink-testing-framework v1.17.13 h1:C8E+P3/ElUxmsIHE0fOd1hDoSc3AbbqcePmN9sNKYdM=
+github.com/smartcontractkit/chainlink-testing-framework v1.17.13/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss=
github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU=
From 8cab430f34d32455577edea4def42521aad32a8d Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri
Date: Thu, 14 Dec 2023 13:18:47 -0800
Subject: [PATCH 33/40] Finalize date on changelog for 2.7.2
---
docs/CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 2a293253132..6d25f55ba44 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -12,7 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.7.2 - 2023-12-7
+## 2.7.2 - 2023-12-14
### Fixed
From dde004ea5184d8789e9c099f42a6f49dd72c4a00 Mon Sep 17 00:00:00 2001
From: Jordan Krage
Date: Thu, 21 Dec 2023 14:32:31 -0600
Subject: [PATCH 34/40] core/services/relay/evm: start RequestRoundTracker;
report full health (#11643)
* core/services/relay/evm: start RequestRoundTracker; report full health
* Tests round requests and implicit changes separately
* Add test to CI
* Fixes other OCR2 checks
---------
Co-authored-by: Adam Hamrick
(cherry picked from commit 7236361119339369127a488a7c238745e4c44099)
---
core/services/relay/evm/evm.go | 25 +++---
core/services/relay/evm/median.go | 23 ++++-
integration-tests/actions/ocr2_helpers.go | 27 +++++-
.../smoke/forwarders_ocr2_test.go | 4 +-
integration-tests/smoke/ocr2_test.go | 88 +++++++++++++++++--
5 files changed, 139 insertions(+), 28 deletions(-)
diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go
index 088a69a2582..b3e22ec5dbc 100644
--- a/core/services/relay/evm/evm.go
+++ b/core/services/relay/evm/evm.go
@@ -12,7 +12,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/jmoiron/sqlx"
pkgerrors "github.com/pkg/errors"
- "go.uber.org/multierr"
"github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator"
"github.com/smartcontractkit/libocr/offchainreporting2/reportingplugin/median"
@@ -519,6 +518,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp
return nil, err
}
return &medianProvider{
+ lggr: lggr.Named("MedianProvider"),
configWatcher: configWatcher,
reportCodec: reportCodec,
contractTransmitter: contractTransmitter,
@@ -529,6 +529,7 @@ func (r *Relayer) NewMedianProvider(rargs commontypes.RelayArgs, pargs commontyp
var _ commontypes.MedianProvider = (*medianProvider)(nil)
type medianProvider struct {
+ lggr logger.Logger
configWatcher *configWatcher
contractTransmitter ContractTransmitter
reportCodec median.ReportCodec
@@ -537,26 +538,22 @@ type medianProvider struct {
ms services.MultiStart
}
-func (p *medianProvider) Name() string {
- return "EVM.MedianProvider"
-}
+func (p *medianProvider) Name() string { return p.lggr.Name() }
func (p *medianProvider) Start(ctx context.Context) error {
- return p.ms.Start(ctx, p.configWatcher, p.contractTransmitter)
+ return p.ms.Start(ctx, p.configWatcher, p.contractTransmitter, p.medianContract)
}
-func (p *medianProvider) Close() error {
- return p.ms.Close()
-}
+func (p *medianProvider) Close() error { return p.ms.Close() }
-func (p *medianProvider) Ready() error {
- return multierr.Combine(p.configWatcher.Ready(), p.contractTransmitter.Ready())
-}
+func (p *medianProvider) Ready() error { return nil }
func (p *medianProvider) HealthReport() map[string]error {
- report := p.configWatcher.HealthReport()
- services.CopyHealth(report, p.contractTransmitter.HealthReport())
- return report
+ hp := map[string]error{p.Name(): p.Ready()}
+ services.CopyHealth(hp, p.configWatcher.HealthReport())
+ services.CopyHealth(hp, p.contractTransmitter.HealthReport())
+ services.CopyHealth(hp, p.medianContract.HealthReport())
+ return hp
}
func (p *medianProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
diff --git a/core/services/relay/evm/median.go b/core/services/relay/evm/median.go
index db521a97208..3f86d6f8233 100644
--- a/core/services/relay/evm/median.go
+++ b/core/services/relay/evm/median.go
@@ -14,6 +14,8 @@ import (
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/services"
+
"github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
offchain_aggregator_wrapper "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -22,12 +24,15 @@ import (
var _ median.MedianContract = &medianContract{}
type medianContract struct {
+ services.StateMachine
+ lggr logger.Logger
configTracker types.ContractConfigTracker
contractCaller *ocr2aggregator.OCR2AggregatorCaller
requestRoundTracker *RequestRoundTracker
}
func newMedianContract(configTracker types.ContractConfigTracker, contractAddress common.Address, chain legacyevm.Chain, specID int32, db *sqlx.DB, lggr logger.Logger) (*medianContract, error) {
+ lggr = lggr.Named("MedianContract")
contract, err := offchain_aggregator_wrapper.NewOffchainAggregator(contractAddress, chain.Client())
if err != nil {
return nil, errors.Wrap(err, "could not instantiate NewOffchainAggregator")
@@ -44,6 +49,7 @@ func newMedianContract(configTracker types.ContractConfigTracker, contractAddres
}
return &medianContract{
+ lggr: lggr,
configTracker: configTracker,
contractCaller: contractCaller,
requestRoundTracker: NewRequestRoundTracker(
@@ -60,13 +66,22 @@ func newMedianContract(configTracker types.ContractConfigTracker, contractAddres
),
}, nil
}
-
-func (oc *medianContract) Start() error {
- return oc.requestRoundTracker.Start()
+func (oc *medianContract) Start(context.Context) error {
+ return oc.StartOnce("MedianContract", func() error {
+ return oc.requestRoundTracker.Start()
+ })
}
func (oc *medianContract) Close() error {
- return oc.requestRoundTracker.Close()
+ return oc.StopOnce("MedianContract", func() error {
+ return oc.requestRoundTracker.Close()
+ })
+}
+
+func (oc *medianContract) Name() string { return oc.lggr.Name() }
+
+func (oc *medianContract) HealthReport() map[string]error {
+ return map[string]error{oc.Name(): oc.Ready()}
}
func (oc *medianContract) LatestTransmissionDetails(ctx context.Context) (ocrtypes.ConfigDigest, uint32, uint8, *big.Int, time.Time, error) {
diff --git a/integration-tests/actions/ocr2_helpers.go b/integration-tests/actions/ocr2_helpers.go
index 02ce73e813e..7b0700c3452 100644
--- a/integration-tests/actions/ocr2_helpers.go
+++ b/integration-tests/actions/ocr2_helpers.go
@@ -21,12 +21,13 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/blockchain"
ctfClient "github.com/smartcontractkit/chainlink-testing-framework/client"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
- "github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/integration-tests/client"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
+
+ "github.com/smartcontractkit/chainlink/v2/core/services/job"
+ "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
+ "github.com/smartcontractkit/chainlink/v2/core/store/models"
)
// DeployOCRv2Contracts deploys a number of OCRv2 contracts and configures them with defaults
@@ -379,3 +380,23 @@ func StartNewOCR2Round(
}
return nil
}
+
+// WatchNewOCR2Round is the same as StartNewOCR2Round but does NOT explicitly request a new round
+// as that can cause odd behavior in tandem with changing adapter values in OCR2
+func WatchNewOCR2Round(
+ roundNumber int64,
+ ocrInstances []contracts.OffchainAggregatorV2,
+ client blockchain.EVMClient,
+ timeout time.Duration,
+ logger zerolog.Logger,
+) error {
+ for i := 0; i < len(ocrInstances); i++ {
+ ocrRound := contracts.NewOffchainAggregatorV2RoundConfirmer(ocrInstances[i], big.NewInt(roundNumber), timeout, logger)
+ client.AddHeaderEventSubscription(ocrInstances[i].Address(), ocrRound)
+ err := client.WaitForEvents()
+ if err != nil {
+ return fmt.Errorf("failed to wait for event subscriptions of OCR instance %d: %w", i+1, err)
+ }
+ }
+ return nil
+}
diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go
index c9fe3cb11d9..7b775cf437f 100644
--- a/integration-tests/smoke/forwarders_ocr2_test.go
+++ b/integration-tests/smoke/forwarders_ocr2_test.go
@@ -89,7 +89,7 @@ func TestForwarderOCR2Basic(t *testing.T) {
err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, ocrInstances)
require.NoError(t, err, "Error configuring OCRv2 aggregator contracts")
- err = actions.StartNewOCR2Round(1, ocrInstances, env.EVMClient, time.Minute*10, l)
+ err = actions.WatchNewOCR2Round(1, ocrInstances, env.EVMClient, time.Minute*10, l)
require.NoError(t, err)
answer, err := ocrInstances[0].GetLatestAnswer(testcontext.Get(t))
@@ -100,7 +100,7 @@ func TestForwarderOCR2Basic(t *testing.T) {
ocrRoundVal := (5 + i) % 10
err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, ocrRoundVal)
require.NoError(t, err)
- err = actions.StartNewOCR2Round(int64(i), ocrInstances, env.EVMClient, time.Minute*10, l)
+ err = actions.WatchNewOCR2Round(int64(i), ocrInstances, env.EVMClient, time.Minute*10, l)
require.NoError(t, err)
answer, err = ocrInstances[0].GetLatestAnswer(testcontext.Get(t))
diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go
index 0676ed03004..9b30ab497b0 100644
--- a/integration-tests/smoke/ocr2_test.go
+++ b/integration-tests/smoke/ocr2_test.go
@@ -11,6 +11,7 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/logging"
"github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext"
+
"github.com/smartcontractkit/chainlink/integration-tests/actions"
"github.com/smartcontractkit/chainlink/integration-tests/contracts"
"github.com/smartcontractkit/chainlink/integration-tests/docker/test_env"
@@ -70,7 +71,7 @@ func TestOCRv2Basic(t *testing.T) {
err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts)
require.NoError(t, err, "Error configuring OCRv2 aggregator contracts")
- err = actions.StartNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err, "Error starting new OCR2 round")
roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
@@ -81,7 +82,7 @@ func TestOCRv2Basic(t *testing.T) {
err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, 10)
require.NoError(t, err)
- err = actions.StartNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ err = actions.WatchNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err)
roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(2))
@@ -92,6 +93,83 @@ func TestOCRv2Basic(t *testing.T) {
)
}
+// Tests that just calling requestNewRound() will properly induce more rounds
+func TestOCRv2Request(t *testing.T) {
+ t.Parallel()
+ l := logging.GetTestLogger(t)
+
+ env, err := test_env.NewCLTestEnvBuilder().
+ WithTestLogger(t).
+ WithGeth().
+ WithMockAdapter().
+ WithCLNodeConfig(node.NewConfig(node.NewBaseConfig(),
+ node.WithOCR2(),
+ node.WithP2Pv2(),
+ node.WithTracing(),
+ )).
+ WithCLNodes(6).
+ WithFunding(big.NewFloat(.1)).
+ WithStandardCleanup().
+ Build()
+ require.NoError(t, err)
+
+ env.ParallelTransactions(true)
+
+ nodeClients := env.ClCluster.NodeAPIs()
+ bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:]
+
+ linkToken, err := env.ContractDeployer.DeployLinkTokenContract()
+ require.NoError(t, err, "Deploying Link Token Contract shouldn't fail")
+
+ err = actions.FundChainlinkNodesLocal(workerNodes, env.EVMClient, big.NewFloat(.05))
+ require.NoError(t, err, "Error funding Chainlink nodes")
+
+ // Gather transmitters
+ var transmitters []string
+ for _, node := range workerNodes {
+ addr, err := node.PrimaryEthAddress()
+ if err != nil {
+ require.NoError(t, fmt.Errorf("error getting node's primary ETH address: %w", err))
+ }
+ transmitters = append(transmitters, addr)
+ }
+
+ ocrOffchainOptions := contracts.DefaultOffChainAggregatorOptions()
+ aggregatorContracts, err := actions.DeployOCRv2Contracts(1, linkToken, env.ContractDeployer, transmitters, env.EVMClient, ocrOffchainOptions)
+ require.NoError(t, err, "Error deploying OCRv2 aggregator contracts")
+
+ err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 5, env.EVMClient.GetChainID().Uint64(), false)
+ require.NoError(t, err, "Error creating OCRv2 jobs")
+
+ ocrv2Config, err := actions.BuildMedianOCR2ConfigLocal(workerNodes, ocrOffchainOptions)
+ require.NoError(t, err, "Error building OCRv2 config")
+
+ err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts)
+ require.NoError(t, err, "Error configuring OCRv2 aggregator contracts")
+
+ err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ require.NoError(t, err, "Error starting new OCR2 round")
+ roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1))
+ require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
+ require.Equal(t, int64(5), roundData.Answer.Int64(),
+ "Expected latest answer from OCR contract to be 5 but got %d",
+ roundData.Answer.Int64(),
+ )
+
+ // Keep the mockserver value the same and continually request new rounds
+ for round := 2; round <= 4; round++ {
+ err = actions.StartNewOCR2Round(int64(round), aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ require.NoError(t, err, "Error starting new OCR2 round")
+ roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(int64(round)))
+ require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
+ require.Equal(t, int64(5), roundData.Answer.Int64(),
+ "Expected round %d answer from OCR contract to be 5 but got %d",
+ round,
+ roundData.Answer.Int64(),
+ )
+ }
+}
+
func TestOCRv2JobReplacement(t *testing.T) {
l := logging.GetTestLogger(t)
@@ -144,7 +222,7 @@ func TestOCRv2JobReplacement(t *testing.T) {
err = actions.ConfigureOCRv2AggregatorContracts(env.EVMClient, ocrv2Config, aggregatorContracts)
require.NoError(t, err, "Error configuring OCRv2 aggregator contracts")
- err = actions.StartNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ err = actions.WatchNewOCR2Round(1, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err, "Error starting new OCR2 round")
roundData, err := aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(1))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
@@ -155,7 +233,7 @@ func TestOCRv2JobReplacement(t *testing.T) {
err = env.MockAdapter.SetAdapterBasedIntValuePath("ocr2", []string{http.MethodGet, http.MethodPost}, 10)
require.NoError(t, err)
- err = actions.StartNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l)
+ err = actions.WatchNewOCR2Round(2, aggregatorContracts, env.EVMClient, time.Minute*5, l)
require.NoError(t, err)
roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(2))
@@ -174,7 +252,7 @@ func TestOCRv2JobReplacement(t *testing.T) {
err = actions.CreateOCRv2JobsLocal(aggregatorContracts, bootstrapNode, workerNodes, env.MockAdapter, "ocr2", 15, env.EVMClient.GetChainID().Uint64(), false)
require.NoError(t, err, "Error creating OCRv2 jobs")
- err = actions.StartNewOCR2Round(3, aggregatorContracts, env.EVMClient, time.Minute*3, l)
+ err = actions.WatchNewOCR2Round(3, aggregatorContracts, env.EVMClient, time.Minute*3, l)
require.NoError(t, err, "Error starting new OCR2 round")
roundData, err = aggregatorContracts[0].GetRound(testcontext.Get(t), big.NewInt(3))
require.NoError(t, err, "Getting latest answer from OCR contract shouldn't fail")
From ca2ec35dbd70780fa4292ce433ded39834da549e Mon Sep 17 00:00:00 2001
From: Dmytro Haidashenko
Date: Fri, 19 Jan 2024 12:59:59 +0100
Subject: [PATCH 35/40] Document metrics renaming
---
docs/CHANGELOG.md | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 62236558c4c..5b34d36ec9b 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -64,6 +64,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `PromReporter` no longer directly reads txm related status from the db, and instead uses the txStore API.
- `L2Suggested` mode is now called `SuggestedPrice`
- Console logs will now escape (non-whitespace) control characters
+- Following EVM Pool metrics were renamed:
+ - `evm_pool_rpc_node_states` → `multi_node_states`
+ - `evm_pool_rpc_node_num_transitions_to_alive` → `pool_rpc_node_num_transitions_to_alive`
+ - `evm_pool_rpc_node_num_transitions_to_in_sync` → `pool_rpc_node_num_transitions_to_in_sync`
+ - `evm_pool_rpc_node_num_transitions_to_out_of_sync` → `pool_rpc_node_num_transitions_to_out_of_sync`
+ - `evm_pool_rpc_node_num_transitions_to_unreachable` → `pool_rpc_node_num_transitions_to_unreachable`
+ - `evm_pool_rpc_node_num_transitions_to_invalid_chain_id` → `pool_rpc_node_num_transitions_to_invalid_chain_id`
+ - `evm_pool_rpc_node_num_transitions_to_unusable` → `pool_rpc_node_num_transitions_to_unusable`
+ - `evm_pool_rpc_node_highest_seen_block` → `pool_rpc_node_highest_seen_block`
+ - `evm_pool_rpc_node_num_seen_blocks` → `pool_rpc_node_num_seen_blocks`
+ - `evm_pool_rpc_node_polls_total` → `pool_rpc_node_polls_total`
+ - `evm_pool_rpc_node_polls_failed` → `pool_rpc_node_polls_failed`
+ - `evm_pool_rpc_node_polls_success` → `pool_rpc_node_polls_success`
### Removed
From 0d4f23243d0235e4430410d0e35d0b4f96f1e03f Mon Sep 17 00:00:00 2001
From: Dylan Tinianov
Date: Mon, 22 Jan 2024 11:36:59 -0500
Subject: [PATCH 36/40] Hotfix attempt for Canary : v2.8.0-rc0 upgrade fails on
Polygon. (#11828) (#11839)
* Remove start lock + add logging
* Add loggs
* Update tracker.go
* Update tracker.go
* Update common/txmgr/txmgr.go
Co-authored-by: Jordan Krage
* Update logging
---------
Co-authored-by: Jordan Krage
(cherry picked from commit 6133df8a2a8b527155a8a822d2924d5ca4bfd122)
---
common/txmgr/tracker.go | 18 +++++++++++++-----
common/txmgr/txmgr.go | 2 ++
2 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/common/txmgr/tracker.go b/common/txmgr/tracker.go
index 1a24dd5b5fe..f143f639aa8 100644
--- a/common/txmgr/tracker.go
+++ b/common/txmgr/tracker.go
@@ -92,19 +92,22 @@ func NewTracker[
}
func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(_ context.Context) (err error) {
- tr.lock.Lock()
- defer tr.lock.Unlock()
+ tr.lggr.Info("Abandoned transaction tracking enabled")
return tr.StartOnce("Tracker", func() error {
return tr.startInternal()
})
}
func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) startInternal() (err error) {
+ tr.lock.Lock()
+ defer tr.lock.Unlock()
+
tr.ctx, tr.ctxCancel = context.WithCancel(context.Background())
if err := tr.setEnabledAddresses(); err != nil {
return fmt.Errorf("failed to set enabled addresses: %w", err)
}
+ tr.lggr.Info("Enabled addresses set")
if err := tr.trackAbandonedTxes(tr.ctx); err != nil {
return fmt.Errorf("failed to track abandoned txes: %w", err)
@@ -112,9 +115,11 @@ func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) startIntern
tr.isStarted = true
if len(tr.txCache) == 0 {
- tr.lggr.Infow("no abandoned txes found, skipping runLoop")
+ tr.lggr.Info("no abandoned txes found, skipping runLoop")
return nil
}
+
+ tr.lggr.Infof("%d abandoned txes found, starting runLoop", len(tr.txCache))
tr.wg.Add(1)
go tr.runLoop()
return nil
@@ -129,7 +134,7 @@ func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() err
}
func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) closeInternal() error {
- tr.lggr.Infow("stopping tracker")
+ tr.lggr.Info("stopping tracker")
if !tr.isStarted {
return fmt.Errorf("tracker not started")
}
@@ -159,7 +164,7 @@ func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() {
}
}
case <-ttlExceeded.C:
- tr.lggr.Infow("ttl exceeded")
+ tr.lggr.Info("ttl exceeded")
tr.MarkAllTxesFatal(tr.ctx)
return
case <-tr.ctx.Done():
@@ -211,6 +216,7 @@ func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) trackAbando
return fmt.Errorf("tracker already started")
}
+ tr.lggr.Info("Retrieving non fatal transactions from txStore")
nonFatalTxes, err := tr.txStore.GetNonFatalTransactions(ctx, tr.chainID)
if err != nil {
return fmt.Errorf("failed to get non fatal txes from txStore: %w", err)
@@ -239,6 +245,8 @@ func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) HandleTxesB
}
func (tr *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleTxesByState(ctx context.Context, blockHeight int64) error {
+ tr.lggr.Info("Handling transactions by state")
+
for id, atx := range tr.txCache {
tx, err := tr.txStore.GetTxByID(ctx, atx.id)
if err != nil {
diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go
index 228ab4ec8bf..fb8e5fbd401 100644
--- a/common/txmgr/txmgr.go
+++ b/common/txmgr/txmgr.go
@@ -189,10 +189,12 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx
return fmt.Errorf("Txm: Estimator failed to start: %w", err)
}
+ b.logger.Info("Txm starting tracker")
if err := ms.Start(ctx, b.tracker); err != nil {
return fmt.Errorf("Txm: Tracker failed to start: %w", err)
}
+ b.logger.Info("Txm starting runLoop")
b.wg.Add(1)
go b.runLoop()
<-b.chSubbed
From 238ca388acccce41b0f6027cc3b879dd6fb1d76a Mon Sep 17 00:00:00 2001
From: Dylan Tinianov
Date: Tue, 23 Jan 2024 12:49:37 -0500
Subject: [PATCH 37/40] Temporarily disable tracker (#11857)
* Disable tracker
* Remove all tracker calls
(cherry picked from commit 5057899e96a1b914ca4b785eacf898827ab742fe)
---
common/txmgr/resender.go | 3 ++-
common/txmgr/txmgr.go | 9 ++++++++-
core/chains/evm/txmgr/tracker_test.go | 4 ++++
3 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/common/txmgr/resender.go b/common/txmgr/resender.go
index 06c466e1730..d93f20095f1 100644
--- a/common/txmgr/resender.go
+++ b/common/txmgr/resender.go
@@ -140,7 +140,8 @@ func (er *Resender[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) resendUnco
return fmt.Errorf("Resender failed getting enabled keys for chain %s: %w", er.chainID.String(), err)
}
- resendAddresses = append(resendAddresses, er.tracker.GetAbandonedAddresses()...)
+ // Tracker currently disabled for BCI-2638; refactor required
+ // resendAddresses = append(resendAddresses, er.tracker.GetAbandonedAddresses()...)
ageThreshold := er.txConfig.ResendAfterThreshold()
maxInFlightTransactions := er.txConfig.MaxInFlight()
diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go
index fb8e5fbd401..de96ca0ff05 100644
--- a/common/txmgr/txmgr.go
+++ b/common/txmgr/txmgr.go
@@ -189,10 +189,12 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx
return fmt.Errorf("Txm: Estimator failed to start: %w", err)
}
+ /* Tracker currently disabled for BCI-2638; refactor required
b.logger.Info("Txm starting tracker")
if err := ms.Start(ctx, b.tracker); err != nil {
return fmt.Errorf("Txm: Tracker failed to start: %w", err)
}
+ */
b.logger.Info("Txm starting runLoop")
b.wg.Add(1)
@@ -272,9 +274,11 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (m
merr = errors.Join(merr, fmt.Errorf("Txm: failed to close TxAttemptBuilder: %w", err))
}
+ /* Tracker currently disabled for BCI-2638; refactor required
if err := b.tracker.Close(); err != nil {
merr = errors.Join(merr, fmt.Errorf("Txm: failed to close Tracker: %w", err))
}
+ */
return nil
})
@@ -389,7 +393,8 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop()
b.broadcaster.Trigger(address)
case head := <-b.chHeads:
b.confirmer.mb.Deliver(head)
- b.tracker.mb.Deliver(head.BlockNumber())
+ // Tracker currently disabled for BCI-2638; refactor required
+ // b.tracker.mb.Deliver(head.BlockNumber())
case reset := <-b.reset:
// This check prevents the weird edge-case where you can select
// into this block after chStop has already been closed and the
@@ -417,10 +422,12 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop()
if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) {
b.logger.Errorw(fmt.Sprintf("Failed to Close Confirmer: %v", err), "err", err)
}
+ /* Tracker currently disabled for BCI-2638; refactor required
err = b.tracker.Close()
if err != nil && (!errors.Is(err, services.ErrAlreadyStopped) || !errors.Is(err, services.ErrCannotStopUnstarted)) {
b.logger.Errorw(fmt.Sprintf("Failed to Close Tracker: %v", err), "err", err)
}
+ */
return
case <-keysChanged:
// This check prevents the weird edge-case where you can select
diff --git a/core/chains/evm/txmgr/tracker_test.go b/core/chains/evm/txmgr/tracker_test.go
index a31187f04e8..af41aaeffa5 100644
--- a/core/chains/evm/txmgr/tracker_test.go
+++ b/core/chains/evm/txmgr/tracker_test.go
@@ -49,6 +49,7 @@ func containsID(txes []*txmgr.Tx, id int64) bool {
}
func TestEvmTracker_Initialization(t *testing.T) {
+ t.Skip("BCI-2638 tracker disabled")
t.Parallel()
tracker, _, _, _ := newTestEvmTrackerSetup(t)
@@ -65,6 +66,7 @@ func TestEvmTracker_Initialization(t *testing.T) {
}
func TestEvmTracker_AddressTracking(t *testing.T) {
+ t.Skip("BCI-2638 tracker disabled")
t.Parallel()
t.Run("track abandoned addresses", func(t *testing.T) {
@@ -94,6 +96,7 @@ func TestEvmTracker_AddressTracking(t *testing.T) {
})
t.Run("stop tracking finalized tx", func(t *testing.T) {
+ t.Skip("BCI-2638 tracker disabled")
tracker, txStore, _, _ := newTestEvmTrackerSetup(t)
confirmedAddr := cltest.MustGenerateRandomKey(t).Address
_ = mustInsertConfirmedEthTxWithReceipt(t, txStore, confirmedAddr, 123, 1)
@@ -118,6 +121,7 @@ func TestEvmTracker_AddressTracking(t *testing.T) {
}
func TestEvmTracker_ExceedingTTL(t *testing.T) {
+ t.Skip("BCI-2638 tracker disabled")
t.Parallel()
t.Run("confirmed but unfinalized transaction still tracked", func(t *testing.T) {
From 773a2393d45148f19f13ce44efdd75abe26e4f06 Mon Sep 17 00:00:00 2001
From: Dylan Tinianov
Date: Tue, 23 Jan 2024 13:41:54 -0500
Subject: [PATCH 38/40] Update CHANGELOG.md
---
docs/CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 5b34d36ec9b..57702efce31 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
+- Disabled tracker component for hotfix. This feature will be re-enabled in a future release.
- Added a tracker component to the txmgr for tracking and gracefully handling abandoned transactions. Abandoned transactions occur when a fromAddress is removed from the keystore by a node operator. The tracker gives abandoned transactions a chance to be finalized on chain, or marks them as fatal_error if they are not finalized within a specified time to live (default 6hrs).
- Added distributed tracing in the OpenTelemetry trace format to the node, currently focused at the LOOPP Plugin development effort. This includes a new set of `Tracing` TOML configurations. The default for collecting traces is off - you must explicitly enable traces and setup a valid OpenTelemetry collector. Refer to `.github/tracing/README.md` for more details.
- Added a new, optional WebServer authentication option that supports LDAP as a user identity provider. This enables user login access and user roles to be managed and provisioned via a centralized remote server that supports the LDAP protocol, which can be helpful when running multiple nodes. See the documentation for more information and config setup instructions. There is a new `[WebServer].AuthenticationMethod` config option, when set to `ldap` requires the new `[WebServer.LDAP]` config section to be defined, see the reference `docs/core.toml`.
From 7deb0ce98c3903b6f841db87f5afe79778de4708 Mon Sep 17 00:00:00 2001
From: Dylan Tinianov
Date: Tue, 23 Jan 2024 14:14:37 -0500
Subject: [PATCH 39/40] Update docs/CHANGELOG.md
Co-authored-by: Jordan Krage
---
docs/CHANGELOG.md | 2 --
1 file changed, 2 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 57702efce31..92ff603228a 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -15,8 +15,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
-- Disabled tracker component for hotfix. This feature will be re-enabled in a future release.
-- Added a tracker component to the txmgr for tracking and gracefully handling abandoned transactions. Abandoned transactions occur when a fromAddress is removed from the keystore by a node operator. The tracker gives abandoned transactions a chance to be finalized on chain, or marks them as fatal_error if they are not finalized within a specified time to live (default 6hrs).
- Added distributed tracing in the OpenTelemetry trace format to the node, currently focused at the LOOPP Plugin development effort. This includes a new set of `Tracing` TOML configurations. The default for collecting traces is off - you must explicitly enable traces and setup a valid OpenTelemetry collector. Refer to `.github/tracing/README.md` for more details.
- Added a new, optional WebServer authentication option that supports LDAP as a user identity provider. This enables user login access and user roles to be managed and provisioned via a centralized remote server that supports the LDAP protocol, which can be helpful when running multiple nodes. See the documentation for more information and config setup instructions. There is a new `[WebServer].AuthenticationMethod` config option, when set to `ldap` requires the new `[WebServer.LDAP]` config section to be defined, see the reference `docs/core.toml`.
- New prom metrics for mercury transmit queue:
From 04fbe42af1f8389ba215826a4c07f2acc86d9844 Mon Sep 17 00:00:00 2001
From: Sneha Agnihotri <180277+snehaagni@users.noreply.github.com>
Date: Wed, 24 Jan 2024 11:26:57 -0800
Subject: [PATCH 40/40] Finalize date on changelog for 2.8.0 (#11881)
---
docs/CHANGELOG.md | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 92ff603228a..4d3a02c0eae 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -11,7 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
...
-## 2.8.0 - UNRELEASED
+
+
+## 2.8.0 - 2024-01-24
### Added
@@ -91,9 +93,11 @@ Starting in `v2.9.0`:
- `TelemetryIngress.URL` and `TelemetryIngress.ServerPubKey` will no longer be allowed. Any TOML configuration that sets this fields will prevent the node from booting. These fields will be replaced by `[[TelemetryIngress.Endpoints]]`
- `P2P.V1` will no longer be supported and must not be set in TOML configuration in order to boot. Use `P2P.V2` instead. If you are using both, `V1` can simply be removed.
-...
+## 2.7.2 - 2023-12-14
-
+### Fixed
+
+- Fixed a bug that caused nodes without OCR or OCR2 enabled to fail config validation if `P2P.V2` was not explicitly disabled. With this fix, NOPs will not have to make changes to their config.
## 2.7.1 - 2023-11-21