From 1ac2902c6c514c00341568b8c6dfd799cd262222 Mon Sep 17 00:00:00 2001 From: Ilja Pavlovs <5300706+iljapavlovs@users.noreply.github.com> Date: Mon, 5 Aug 2024 15:00:39 +0300 Subject: [PATCH 1/6] VRF-1138: Make CTF tests to reuse existing VRF Wrapper (#13854) * VRF-1138: Make CTF tests to reuse existing VRF Wrapper * VRF-1138: remove old code * VRF-1138: remove comments * VRF-1138: refactoring * VRF-1138: pr comments * VRF-1138: pr comments * VRF-1138: fixing lint issues * VRF-1138: PR comments --- integration-tests/actions/actions.go | 6 +- .../actions/vrf/common/actions.go | 30 ++ .../actions/vrf/common/models.go | 1 + .../actions/vrf/vrfv2/contract_steps.go | 4 +- .../actions/vrf/vrfv2/setup_steps.go | 40 +-- .../actions/vrf/vrfv2plus/contract_steps.go | 68 ++++- .../actions/vrf/vrfv2plus/models.go | 4 +- .../actions/vrf/vrfv2plus/setup_steps.go | 270 +++++++++++++----- .../contracts/contract_vrf_models.go | 4 + .../contracts/ethereum_contracts.go | 4 +- .../contracts/ethereum_vrf_contracts.go | 20 ++ .../contracts/ethereum_vrfv2_contracts.go | 24 ++ .../contracts/ethereum_vrfv2plus_contracts.go | 24 ++ integration-tests/smoke/vrfv2plus_test.go | 45 ++- .../testconfig/common/vrf/common.go | 36 ++- integration-tests/testconfig/default.toml | 152 ++++++++++ integration-tests/testconfig/vrfv2/config.go | 6 - integration-tests/testconfig/vrfv2/vrfv2.toml | 12 +- .../testconfig/vrfv2plus/vrfv2plus.toml | 23 +- 19 files changed, 599 insertions(+), 174 deletions(-) diff --git a/integration-tests/actions/actions.go b/integration-tests/actions/actions.go index 65db18ad6f7..8487e3a264e 100644 --- a/integration-tests/actions/actions.go +++ b/integration-tests/actions/actions.go @@ -1216,7 +1216,7 @@ func RandBool() bool { return rand.Intn(2) == 1 } -func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bool, l zerolog.Logger) (bool, error) { +func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bool, wg *sync.WaitGroup, l zerolog.Logger) (bool, error) { counterContract, err := contracts.DeployCounterContract(sethClient) if err != nil { return false, err @@ -1230,6 +1230,10 @@ func ContinuouslyGenerateTXsOnChain(sethClient *seth.Client, stopChannel chan bo select { case <-stopChannel: l.Info().Str("Number of generated transactions on chain", count.String()).Msg("Stopping generating txs on chain. Desired block number reached.") + sleepDuration := time.Second * 10 + l.Info().Str("Waiting for", sleepDuration.String()).Msg("Waiting for transactions to be mined and avoid nonce issues") + time.Sleep(sleepDuration) + wg.Done() return true, nil default: err = counterContract.Increment() diff --git a/integration-tests/actions/vrf/common/actions.go b/integration-tests/actions/vrf/common/actions.go index e599c705ef0..1300ac8b726 100644 --- a/integration-tests/actions/vrf/common/actions.go +++ b/integration-tests/actions/vrf/common/actions.go @@ -20,6 +20,7 @@ import ( ctf_test_env "github.com/smartcontractkit/chainlink-testing-framework/docker/test_env" "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions" seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth" + "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/client" @@ -384,6 +385,35 @@ func BuildNewCLEnvForVRF(l zerolog.Logger, t *testing.T, envConfig VRFEnvConfig, return env, sethClient, nil } +func LoadExistingCLEnvForVRF( + t *testing.T, + envConfig VRFEnvConfig, + commonExistingEnvConfig *vrf_common_config.ExistingEnvConfig, + l zerolog.Logger, +) (*test_env.CLClusterTestEnv, *seth.Client, error) { + env, err := test_env.NewCLTestEnvBuilder(). + WithTestInstance(t). + WithTestConfig(&envConfig.TestConfig). + WithCustomCleanup(envConfig.CleanupFn). + Build() + if err != nil { + return nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err) + } + evmNetwork, err := env.GetFirstEvmNetwork() + if err != nil { + return nil, nil, err + } + sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork) + if err != nil { + return nil, nil, err + } + err = FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l) + if err != nil { + return nil, nil, err + } + return env, sethClient, nil +} + func GetRPCUrl(env *test_env.CLClusterTestEnv, chainID int64) (string, error) { provider, err := env.GetRpcProvider(chainID) if err != nil { diff --git a/integration-tests/actions/vrf/common/models.go b/integration-tests/actions/vrf/common/models.go index 9baa5c96e1d..f51fd84ba07 100644 --- a/integration-tests/actions/vrf/common/models.go +++ b/integration-tests/actions/vrf/common/models.go @@ -55,6 +55,7 @@ type VRFContracts struct { VRFV2PlusConsumer []contracts.VRFv2PlusLoadTestConsumer LinkToken contracts.LinkToken MockETHLINKFeed contracts.VRFMockETHLINKFeed + LinkNativeFeedAddress string } type VRFOwnerConfig struct { diff --git a/integration-tests/actions/vrf/vrfv2/contract_steps.go b/integration-tests/actions/vrf/vrfv2/contract_steps.go index 324b65b5d6c..1b909be9b83 100644 --- a/integration-tests/actions/vrf/vrfv2/contract_steps.go +++ b/integration-tests/actions/vrf/vrfv2/contract_steps.go @@ -635,7 +635,7 @@ func SetupNewConsumersAndSubs( ) ([]contracts.VRFv2LoadTestConsumer, []uint64, error) { consumers, err := DeployVRFV2Consumers(sethClient, coordinator.Address(), numberOfConsumerContractsToDeployAndAddToSub) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } l.Info(). Str("Coordinator", *testConfig.VRFv2.ExistingEnvConfig.ExistingEnvConfig.CoordinatorAddress). @@ -649,7 +649,7 @@ func SetupNewConsumersAndSubs( numberOfSubToCreate, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } return consumers, subIDs, nil } diff --git a/integration-tests/actions/vrf/vrfv2/setup_steps.go b/integration-tests/actions/vrf/vrfv2/setup_steps.go index c13aed807a9..b5852f82815 100644 --- a/integration-tests/actions/vrf/vrfv2/setup_steps.go +++ b/integration-tests/actions/vrf/vrfv2/setup_steps.go @@ -8,8 +8,6 @@ import ( "github.com/smartcontractkit/seth" - seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth" - "github.com/ethereum/go-ethereum/common" "github.com/rs/zerolog" "golang.org/x/sync/errgroup" @@ -371,38 +369,30 @@ func SetupVRFV2ForNewEnv( func SetupVRFV2ForExistingEnv(t *testing.T, envConfig vrfcommon.VRFEnvConfig, l zerolog.Logger) (*vrfcommon.VRFContracts, *vrfcommon.VRFKeyData, *test_env.CLClusterTestEnv, *seth.Client, error) { commonExistingEnvConfig := envConfig.TestConfig.VRFv2.ExistingEnvConfig.ExistingEnvConfig - env, err := test_env.NewCLTestEnvBuilder(). - WithTestInstance(t). - WithTestConfig(&envConfig.TestConfig). - WithCustomCleanup(envConfig.CleanupFn). - Build() - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err) - } - evmNetwork, err := env.GetFirstEvmNetwork() - if err != nil { - return nil, nil, nil, nil, err - } - sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork) + env, sethClient, err := vrfcommon.LoadExistingCLEnvForVRF( + t, + envConfig, + commonExistingEnvConfig, + l, + ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading existing CL env", err) } coordinator, err := contracts.LoadVRFCoordinatorV2(sethClient, *commonExistingEnvConfig.ConsumerAddress) if err != nil { return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading VRFCoordinator2", err) } - linkAddr := common.HexToAddress(*commonExistingEnvConfig.LinkAddress) - linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, linkAddr) + linkAddress, err := coordinator.GetLinkAddress(testcontext.Get(t)) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err) + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err) } - err = vrfcommon.FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l) + linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(linkAddress.String())) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err) } blockHashStoreAddress, err := coordinator.GetBlockHashStoreAddress(testcontext.Get(t)) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, nil, nil, err } blockHashStore, err := contracts.LoadBlockHashStore(sethClient, blockHashStoreAddress.String()) if err != nil { @@ -449,13 +439,13 @@ func SetupSubsAndConsumersForExistingEnv( l, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } } else { addr := common.HexToAddress(*commonExistingEnvConfig.ConsumerAddress) consumer, err := contracts.LoadVRFv2LoadTestConsumer(sethClient, addr) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } consumers = append(consumers, consumer) subIDs = append(subIDs, *testConfig.VRFv2.ExistingEnvConfig.SubID) @@ -471,7 +461,7 @@ func SetupSubsAndConsumersForExistingEnv( l, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } } return subIDs, consumers, nil diff --git a/integration-tests/actions/vrf/vrfv2plus/contract_steps.go b/integration-tests/actions/vrf/vrfv2plus/contract_steps.go index 479b00d952e..5a4ec9ba11a 100644 --- a/integration-tests/actions/vrf/vrfv2plus/contract_steps.go +++ b/integration-tests/actions/vrf/vrfv2plus/contract_steps.go @@ -56,7 +56,7 @@ func DeployVRFV2_5Contracts( } batchCoordinator, err := contracts.DeployBatchVRFCoordinatorV2Plus(chainClient, coordinator.Address()) if err != nil { - return nil, fmt.Errorf("%s, err %w", ErrDeployBatchCoordinatorV2Plus, err) + return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployBatchCoordinatorV2Plus, err) } return &vrfcommon.VRFContracts{ CoordinatorV2Plus: coordinator, @@ -407,7 +407,7 @@ func DeployVRFV2PlusDirectFundingContracts( linkTokenAddress string, linkEthFeedAddress string, coordinator contracts.VRFCoordinatorV2_5, - consumerContractsAmount int, + numberOfConsumerContracts int, wrapperSubId *big.Int, configGeneral *vrfv2plusconfig.General, ) (*VRFV2PlusWrapperContracts, error) { @@ -432,7 +432,7 @@ func DeployVRFV2PlusDirectFundingContracts( return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployWrapper, err) } } - consumers, err := DeployVRFV2PlusWrapperConsumers(sethClient, vrfv2PlusWrapper, consumerContractsAmount) + consumers, err := DeployVRFV2PlusWrapperConsumers(sethClient, vrfv2PlusWrapper, numberOfConsumerContracts) if err != nil { return nil, err } @@ -545,9 +545,9 @@ func WaitRandomWordsFulfilledEvent( return randomWordsFulfilledEvent, err } -func DeployVRFV2PlusWrapperConsumers(client *seth.Client, vrfV2PlusWrapper contracts.VRFV2PlusWrapper, consumerContractsAmount int) ([]contracts.VRFv2PlusWrapperLoadTestConsumer, error) { +func DeployVRFV2PlusWrapperConsumers(client *seth.Client, vrfV2PlusWrapper contracts.VRFV2PlusWrapper, numberOfConsumerContracts int) ([]contracts.VRFv2PlusWrapperLoadTestConsumer, error) { var consumers []contracts.VRFv2PlusWrapperLoadTestConsumer - for i := 1; i <= consumerContractsAmount; i++ { + for i := 1; i <= numberOfConsumerContracts; i++ { loadTestConsumer, err := contracts.DeployVRFV2PlusWrapperLoadTestConsumer(client, vrfV2PlusWrapper.Address()) if err != nil { return nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrAdvancedConsumer, err) @@ -609,7 +609,7 @@ func SetupNewConsumersAndSubs( ) ([]contracts.VRFv2PlusLoadTestConsumer, []*big.Int, error) { consumers, err := DeployVRFV2PlusConsumers(sethClient, coordinator, consumerContractsAmount) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } l.Info(). Str("Coordinator", *testConfig.VRFv2Plus.ExistingEnvConfig.ExistingEnvConfig.CoordinatorAddress). @@ -627,7 +627,7 @@ func SetupNewConsumersAndSubs( *testConfig.VRFv2Plus.General.SubscriptionBillingType, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } return consumers, subIDs, nil } @@ -652,3 +652,57 @@ func CancelSubsAndReturnFunds(ctx context.Context, vrfContracts *vrfcommon.VRFCo } } } + +func FundWrapperConsumer( + sethClient *seth.Client, + subFundingType string, + linkToken contracts.LinkToken, + wrapperConsumer contracts.VRFv2PlusWrapperLoadTestConsumer, + vrfv2PlusConfig *vrfv2plusconfig.General, + l zerolog.Logger, +) error { + fundConsumerWithLink := func() error { + //fund consumer with Link + linkAmount := big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2PlusConfig.WrapperConsumerFundingAmountLink)) + l.Info(). + Str("Link Amount", linkAmount.String()). + Str("WrapperConsumerAddress", wrapperConsumer.Address()).Msg("Funding WrapperConsumer with Link") + return linkToken.Transfer( + wrapperConsumer.Address(), + linkAmount, + ) + } + fundConsumerWithNative := func() error { + //fund consumer with Eth (native token) + _, err := actions.SendFunds(l, sethClient, actions.FundsToSendPayload{ + ToAddress: common.HexToAddress(wrapperConsumer.Address()), + Amount: conversions.EtherToWei(big.NewFloat(*vrfv2PlusConfig.WrapperConsumerFundingAmountNativeToken)), + PrivateKey: sethClient.PrivateKeys[0], + }) + return err + } + switch vrfv2plusconfig.BillingType(subFundingType) { + case vrfv2plusconfig.BillingType_Link: + err := fundConsumerWithLink() + if err != nil { + return err + } + case vrfv2plusconfig.BillingType_Native: + err := fundConsumerWithNative() + if err != nil { + return err + } + case vrfv2plusconfig.BillingType_Link_and_Native: + err := fundConsumerWithLink() + if err != nil { + return err + } + err = fundConsumerWithNative() + if err != nil { + return err + } + default: + return fmt.Errorf("invalid billing type: %s", subFundingType) + } + return nil +} diff --git a/integration-tests/actions/vrf/vrfv2plus/models.go b/integration-tests/actions/vrf/vrfv2plus/models.go index a2ca8ec582b..5198439c050 100644 --- a/integration-tests/actions/vrf/vrfv2plus/models.go +++ b/integration-tests/actions/vrf/vrfv2plus/models.go @@ -5,6 +5,6 @@ import ( ) type VRFV2PlusWrapperContracts struct { - VRFV2PlusWrapper contracts.VRFV2PlusWrapper - LoadTestConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer + VRFV2PlusWrapper contracts.VRFV2PlusWrapper + WrapperConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer } diff --git a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go index f3c7d53d6ee..4833afb9fef 100644 --- a/integration-tests/actions/vrf/vrfv2plus/setup_steps.go +++ b/integration-tests/actions/vrf/vrfv2plus/setup_steps.go @@ -8,8 +8,6 @@ import ( "github.com/smartcontractkit/seth" - seth_utils "github.com/smartcontractkit/chainlink-testing-framework/utils/seth" - "github.com/shopspring/decimal" "golang.org/x/sync/errgroup" @@ -17,7 +15,6 @@ import ( "github.com/google/uuid" "github.com/rs/zerolog" - "github.com/smartcontractkit/chainlink-testing-framework/utils/conversions" "github.com/smartcontractkit/chainlink-testing-framework/utils/testcontext" "github.com/smartcontractkit/chainlink/integration-tests/actions" vrfcommon "github.com/smartcontractkit/chainlink/integration-tests/actions/vrf/common" @@ -28,7 +25,7 @@ import ( "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" - vrfv2plus_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/vrfv2plus" + vrfv2plusconfig "github.com/smartcontractkit/chainlink/integration-tests/testconfig/vrfv2plus" "github.com/smartcontractkit/chainlink/integration-tests/types" ) @@ -201,7 +198,7 @@ func SetupVRFV2_5Environment( return vrfContracts, &vrfKeyData, nodeTypeToNodeMap, nil } -func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *vrfv2plus_config.General, pubKeyCompressed string, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error { +func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *vrfv2plusconfig.General, pubKeyCompressed string, l zerolog.Logger, vrfNode *vrfcommon.VRFNode) error { vrfJobSpecConfig := vrfcommon.VRFJobSpecConfig{ ForwardingAllowed: *config.VRFJobForwardingAllowed, CoordinatorAddress: contracts.CoordinatorV2Plus.Address(), @@ -235,7 +232,10 @@ func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *v nodeConfig := node.NewConfig(vrfNode.CLNode.NodeConfig, node.WithKeySpecificMaxGasPrice(vrfNode.TXKeyAddressStrings, *config.CLNodeMaxGasPriceGWei), ) - l.Info().Msg("Restarting Node with new sending key PriceMax configuration") + l.Info(). + Strs("Sending Keys", vrfNode.TXKeyAddressStrings). + Int64("Price Max Setting", *config.CLNodeMaxGasPriceGWei). + Msg("Restarting Node with new sending key PriceMax configuration") err = vrfNode.CLNode.Restart(nodeConfig) if err != nil { return fmt.Errorf(vrfcommon.ErrGenericFormat, vrfcommon.ErrRestartCLNode, err) @@ -243,29 +243,119 @@ func setupVRFNode(contracts *vrfcommon.VRFContracts, chainID *big.Int, config *v return nil } -func SetupVRFV2PlusWrapperEnvironment( +func SetupVRFV2PlusWrapperForExistingEnv( ctx context.Context, + sethClient *seth.Client, + vrfContracts *vrfcommon.VRFContracts, + keyHash [32]byte, + vrfv2PlusTestConfig types.VRFv2PlusTestConfig, + numberOfConsumerContracts int, l zerolog.Logger, +) (*VRFV2PlusWrapperContracts, *big.Int, error) { + config := *vrfv2PlusTestConfig.GetVRFv2PlusConfig() + var wrapper contracts.VRFV2PlusWrapper + var err error + if *config.ExistingEnvConfig.UseExistingWrapper { + wrapper, err = contracts.LoadVRFV2PlusWrapper(sethClient, *config.ExistingEnvConfig.WrapperAddress) + if err != nil { + return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error loading VRFV2PlusWrapper", err) + } + } else { + wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, vrfContracts.CoordinatorV2Plus) + if err != nil { + return nil, nil, err + } + wrapper, err = contracts.DeployVRFV2PlusWrapper(sethClient, vrfContracts.LinkToken.Address(), vrfContracts.LinkNativeFeedAddress, vrfContracts.CoordinatorV2Plus.Address(), wrapperSubId) + if err != nil { + return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, ErrDeployWrapper, err) + } + err = FundSubscriptions( + big.NewFloat(*config.General.SubscriptionFundingAmountNative), + big.NewFloat(*config.General.SubscriptionFundingAmountLink), + vrfContracts.LinkToken, + vrfContracts.CoordinatorV2Plus, + []*big.Int{wrapperSubId}, + *config.General.SubscriptionBillingType, + ) + if err != nil { + return nil, nil, err + } + err = vrfContracts.CoordinatorV2Plus.AddConsumer(wrapperSubId, wrapper.Address()) + if err != nil { + return nil, nil, err + } + err = wrapper.SetConfig( + *config.General.WrapperGasOverhead, + *config.General.CoordinatorGasOverheadNative, + *config.General.CoordinatorGasOverheadLink, + *config.General.CoordinatorGasOverheadPerWord, + *config.General.CoordinatorNativePremiumPercentage, + *config.General.CoordinatorLinkPremiumPercentage, + keyHash, + *config.General.WrapperMaxNumberOfWords, + *config.General.StalenessSeconds, + decimal.RequireFromString(*config.General.FallbackWeiPerUnitLink).BigInt(), + *config.General.FulfillmentFlatFeeNativePPM, + *config.General.FulfillmentFlatFeeLinkDiscountPPM, + ) + if err != nil { + return nil, nil, err + } + } + wrapperSubID, err := wrapper.GetSubID(ctx) + if err != nil { + return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error getting subID", err) + } + var wrapperConsumers []contracts.VRFv2PlusWrapperLoadTestConsumer + if *config.ExistingEnvConfig.CreateFundAddWrapperConsumers { + wrapperConsumers, err = DeployVRFV2PlusWrapperConsumers(sethClient, wrapper, numberOfConsumerContracts) + if err != nil { + return nil, nil, err + } + } else { + wrapperConsumer, err := contracts.LoadVRFV2WrapperLoadTestConsumer(sethClient, *config.ExistingEnvConfig.WrapperConsumerAddress) + if err != nil { + return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, "error loading VRFV2WrapperLoadTestConsumer", err) + } + wrapperConsumers = append(wrapperConsumers, wrapperConsumer) + } + wrapperContracts := &VRFV2PlusWrapperContracts{wrapper, wrapperConsumers} + for _, consumer := range wrapperConsumers { + err = FundWrapperConsumer( + sethClient, + *config.General.SubscriptionBillingType, + vrfContracts.LinkToken, + consumer, + config.General, + l, + ) + if err != nil { + return nil, nil, err + } + } + return wrapperContracts, wrapperSubID, nil +} + +func SetupVRFV2PlusWrapperForNewEnv( + ctx context.Context, sethClient *seth.Client, vrfv2PlusTestConfig types.VRFv2PlusTestConfig, - linkToken contracts.LinkToken, - mockNativeLINKFeed contracts.MockETHLINKFeed, - coordinator contracts.VRFCoordinatorV2_5, + vrfContracts *vrfcommon.VRFContracts, keyHash [32]byte, wrapperConsumerContractsAmount int, + l zerolog.Logger, ) (*VRFV2PlusWrapperContracts, *big.Int, error) { // external EOA has to create a subscription for the wrapper first - wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, coordinator) + wrapperSubId, err := CreateSubAndFindSubID(ctx, sethClient, vrfContracts.CoordinatorV2Plus) if err != nil { return nil, nil, err } - vrfv2PlusConfig := vrfv2PlusTestConfig.GetVRFv2PlusConfig().General wrapperContracts, err := DeployVRFV2PlusDirectFundingContracts( sethClient, - linkToken.Address(), - mockNativeLINKFeed.Address(), - coordinator, + vrfContracts.LinkToken.Address(), + vrfContracts.MockETHLINKFeed.Address(), + vrfContracts.CoordinatorV2Plus, wrapperConsumerContractsAmount, wrapperSubId, vrfv2PlusConfig, @@ -273,13 +363,11 @@ func SetupVRFV2PlusWrapperEnvironment( if err != nil { return nil, nil, fmt.Errorf(vrfcommon.ErrGenericFormat, vrfcommon.ErrWaitTXsComplete, err) } - // once the wrapper is deployed, wrapper address will become consumer of external EOA subscription - err = coordinator.AddConsumer(wrapperSubId, wrapperContracts.VRFV2PlusWrapper.Address()) + err = vrfContracts.CoordinatorV2Plus.AddConsumer(wrapperSubId, wrapperContracts.VRFV2PlusWrapper.Address()) if err != nil { return nil, nil, err } - err = wrapperContracts.VRFV2PlusWrapper.SetConfig( *vrfv2PlusConfig.WrapperGasOverhead, *vrfv2PlusConfig.CoordinatorGasOverheadNative, @@ -297,53 +385,35 @@ func SetupVRFV2PlusWrapperEnvironment( if err != nil { return nil, nil, err } - //fund sub wrapperSubID, err := wrapperContracts.VRFV2PlusWrapper.GetSubID(ctx) if err != nil { return nil, nil, err } - err = FundSubscriptions( big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountNative), big.NewFloat(*vrfv2PlusTestConfig.GetVRFv2PlusConfig().General.SubscriptionFundingAmountLink), - linkToken, - coordinator, + vrfContracts.LinkToken, + vrfContracts.CoordinatorV2Plus, []*big.Int{wrapperSubID}, *vrfv2PlusConfig.SubscriptionBillingType, ) if err != nil { return nil, nil, err } - - //fund consumer with Link - err = linkToken.Transfer( - wrapperContracts.LoadTestConsumers[0].Address(), - big.NewInt(0).Mul(big.NewInt(1e18), big.NewInt(*vrfv2PlusConfig.WrapperConsumerFundingAmountLink)), - ) - if err != nil { - return nil, nil, err - } - - //fund consumer with Eth (native token) - _, err = actions.SendFunds(l, sethClient, actions.FundsToSendPayload{ - ToAddress: common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), - Amount: conversions.EtherToWei(big.NewFloat(*vrfv2PlusConfig.WrapperConsumerFundingAmountNativeToken)), - PrivateKey: sethClient.PrivateKeys[0], - }) - if err != nil { - return nil, nil, err - } - - wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(ctx, common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil) - if err != nil { - return nil, nil, err + for _, consumer := range wrapperContracts.WrapperConsumers { + err = FundWrapperConsumer( + sethClient, + *vrfv2PlusConfig.SubscriptionBillingType, + vrfContracts.LinkToken, + consumer, + vrfv2PlusConfig, + l, + ) + if err != nil { + return nil, nil, err + } } - l.Info(). - Str("WrapperConsumerBalanceBeforeRequestWei", wrapperConsumerBalanceBeforeRequestWei.String()). - Str("WrapperConsumerAddress", wrapperContracts.LoadTestConsumers[0].Address()). - Msg("WrapperConsumerBalanceBeforeRequestWei") - return wrapperContracts, wrapperSubID, nil } @@ -421,47 +491,45 @@ func SetupVRFV2PlusForNewEnv( func SetupVRFV2PlusForExistingEnv(t *testing.T, envConfig vrfcommon.VRFEnvConfig, l zerolog.Logger) (*vrfcommon.VRFContracts, *vrfcommon.VRFKeyData, *test_env.CLClusterTestEnv, *seth.Client, error) { commonExistingEnvConfig := envConfig.TestConfig.VRFv2Plus.ExistingEnvConfig.ExistingEnvConfig - env, err := test_env.NewCLTestEnvBuilder(). - WithTestInstance(t). - WithTestConfig(&envConfig.TestConfig). - WithCustomCleanup(envConfig.CleanupFn). - Build() - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error creating test env", err) - } - evmNetwork, err := env.GetFirstEvmNetwork() - if err != nil { - return nil, nil, nil, nil, err - } - sethClient, err := seth_utils.GetChainClient(envConfig.TestConfig, *evmNetwork) + env, sethClient, err := vrfcommon.LoadExistingCLEnvForVRF( + t, + envConfig, + commonExistingEnvConfig, + l, + ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading existing CL env", err) } coordinator, err := contracts.LoadVRFCoordinatorV2_5(sethClient, *commonExistingEnvConfig.CoordinatorAddress) if err != nil { return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading VRFCoordinator2_5", err) } - linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(*commonExistingEnvConfig.LinkAddress)) + linkAddress, err := coordinator.GetLinkAddress(testcontext.Get(t)) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err) + } + linkToken, err := contracts.LoadLinkTokenContract(l, sethClient, common.HexToAddress(linkAddress.String())) if err != nil { return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading LinkToken", err) } - err = vrfcommon.FundNodesIfNeeded(testcontext.Get(t), commonExistingEnvConfig, sethClient, l) + linkNativeFeedAddress, err := coordinator.GetLinkNativeFeed(testcontext.Get(t)) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error getting Link address from Coordinator", err) } blockHashStoreAddress, err := coordinator.GetBlockHashStoreAddress(testcontext.Get(t)) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, nil, nil, err } blockHashStore, err := contracts.LoadBlockHashStore(sethClient, blockHashStoreAddress.String()) if err != nil { return nil, nil, nil, nil, fmt.Errorf("%s, err: %w", "error loading BlockHashStore", err) } vrfContracts := &vrfcommon.VRFContracts{ - CoordinatorV2Plus: coordinator, - VRFV2PlusConsumer: nil, - LinkToken: linkToken, - BHS: blockHashStore, + CoordinatorV2Plus: coordinator, + VRFV2PlusConsumer: nil, + LinkToken: linkToken, + BHS: blockHashStore, + LinkNativeFeedAddress: linkNativeFeedAddress.String(), } vrfKey := &vrfcommon.VRFKeyData{ VRFKey: nil, @@ -500,12 +568,12 @@ func SetupSubsAndConsumersForExistingEnv( l, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } } else { consumer, err := contracts.LoadVRFv2PlusLoadTestConsumer(sethClient, *commonExistingEnvConfig.ConsumerAddress) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } consumers = append(consumers, consumer) var ok bool @@ -527,21 +595,65 @@ func SetupSubsAndConsumersForExistingEnv( l, ) if err != nil { - return nil, nil, fmt.Errorf("err: %w", err) + return nil, nil, err } } return subIDs, consumers, nil } func SelectBillingTypeWithDistribution(billingType string, distributionFn func() bool) (bool, error) { - switch vrfv2plus_config.BillingType(billingType) { - case vrfv2plus_config.BillingType_Link: + switch vrfv2plusconfig.BillingType(billingType) { + case vrfv2plusconfig.BillingType_Link: return false, nil - case vrfv2plus_config.BillingType_Native: + case vrfv2plusconfig.BillingType_Native: return true, nil - case vrfv2plus_config.BillingType_Link_and_Native: + case vrfv2plusconfig.BillingType_Link_and_Native: return distributionFn(), nil default: return false, fmt.Errorf("invalid billing type: %s", billingType) } } + +func SetupVRFV2PlusWrapperUniverse( + ctx context.Context, + sethClient *seth.Client, + vrfContracts *vrfcommon.VRFContracts, + config *tc.TestConfig, + keyHash [32]byte, + numberOfConsumerContracts int, + l zerolog.Logger, +) (*VRFV2PlusWrapperContracts, *big.Int, error) { + var ( + wrapperContracts *VRFV2PlusWrapperContracts + wrapperSubID *big.Int + err error + ) + if *config.VRFv2Plus.General.UseExistingEnv { + wrapperContracts, wrapperSubID, err = SetupVRFV2PlusWrapperForExistingEnv( + ctx, + sethClient, + vrfContracts, + keyHash, + config, + numberOfConsumerContracts, + l, + ) + if err != nil { + return nil, nil, err + } + } else { + wrapperContracts, wrapperSubID, err = SetupVRFV2PlusWrapperForNewEnv( + ctx, + sethClient, + config, + vrfContracts, + keyHash, + numberOfConsumerContracts, + l, + ) + if err != nil { + return nil, nil, err + } + } + return wrapperContracts, wrapperSubID, nil +} diff --git a/integration-tests/contracts/contract_vrf_models.go b/integration-tests/contracts/contract_vrf_models.go index 45825a18ff3..c798c4921c6 100644 --- a/integration-tests/contracts/contract_vrf_models.go +++ b/integration-tests/contracts/contract_vrf_models.go @@ -76,6 +76,8 @@ type VRFCoordinatorV2 interface { WaitForConfigSetEvent(timeout time.Duration) (*CoordinatorConfigSet, error) OracleWithdraw(recipient common.Address, amount *big.Int) error GetBlockHashStoreAddress(ctx context.Context) (common.Address, error) + GetLinkAddress(ctx context.Context) (common.Address, error) + GetLinkNativeFeed(ctx context.Context) (common.Address, error) } type VRFCoordinatorV2_5 interface { @@ -121,6 +123,8 @@ type VRFCoordinatorV2_5 interface { ParseRandomWordsFulfilled(log types.Log) (*CoordinatorRandomWordsFulfilled, error) WaitForConfigSetEvent(timeout time.Duration) (*CoordinatorConfigSet, error) GetBlockHashStoreAddress(ctx context.Context) (common.Address, error) + GetLinkAddress(ctx context.Context) (common.Address, error) + GetLinkNativeFeed(ctx context.Context) (common.Address, error) } type VRFCoordinatorV2PlusUpgradedVersion interface { diff --git a/integration-tests/contracts/ethereum_contracts.go b/integration-tests/contracts/ethereum_contracts.go index 2db6aeb4637..5b08c9a9fbf 100644 --- a/integration-tests/contracts/ethereum_contracts.go +++ b/integration-tests/contracts/ethereum_contracts.go @@ -770,12 +770,12 @@ func DeployLinkTokenContract(l zerolog.Logger, client *seth.Client) (*EthereumLi } func LoadLinkTokenContract(l zerolog.Logger, client *seth.Client, address common.Address) (*EthereumLinkToken, error) { - abi, err := link_token_interface.LinkTokenMetaData.GetAbi() + linkABI, err := link_token_interface.LinkTokenMetaData.GetAbi() if err != nil { return &EthereumLinkToken{}, fmt.Errorf("failed to get LinkToken ABI: %w", err) } - client.ContractStore.AddABI("LinkToken", *abi) + client.ContractStore.AddABI("LinkToken", *linkABI) client.ContractStore.AddBIN("LinkToken", common.FromHex(link_token_interface.LinkTokenMetaData.Bin)) linkToken, err := link_token_interface.NewLinkToken(address, wrappers.MustNewWrappedContractBackend(nil, client)) diff --git a/integration-tests/contracts/ethereum_vrf_contracts.go b/integration-tests/contracts/ethereum_vrf_contracts.go index e4dbb87d0b2..a09cc809c63 100644 --- a/integration-tests/contracts/ethereum_vrf_contracts.go +++ b/integration-tests/contracts/ethereum_vrf_contracts.go @@ -13,6 +13,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_coordinator_v2_5" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrf_v2plus_load_test_with_metrics" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_optimism" "github.com/smartcontractkit/seth" @@ -546,3 +547,22 @@ func LoadVRFV2PlusWrapperOptimism(seth *seth.Client, addr string) (*EthereumVRFV wrapper: contract, }, nil } + +func LoadVRFV2WrapperLoadTestConsumer(seth *seth.Client, addr string) (*EthereumVRFV2PlusWrapperLoadTestConsumer, error) { + address := common.HexToAddress(addr) + abi, err := vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumerMetaData.GetAbi() + if err != nil { + return nil, fmt.Errorf("failed to get VRFV2PlusWrapperLoadTestConsumer ABI: %w", err) + } + seth.ContractStore.AddABI("VRFV2PlusWrapperLoadTestConsumer", *abi) + seth.ContractStore.AddBIN("VRFV2PlusWrapperLoadTestConsumer", common.FromHex(vrfv2plus_wrapper_load_test_consumer.VRFV2PlusWrapperLoadTestConsumerMetaData.Bin)) + contract, err := vrfv2plus_wrapper_load_test_consumer.NewVRFV2PlusWrapperLoadTestConsumer(address, wrappers.MustNewWrappedContractBackend(nil, seth)) + if err != nil { + return nil, fmt.Errorf("failed to instantiate VRFV2PlusWrapperLoadTestConsumer instance: %w", err) + } + return &EthereumVRFV2PlusWrapperLoadTestConsumer{ + client: seth, + address: address, + consumer: contract, + }, nil +} diff --git a/integration-tests/contracts/ethereum_vrfv2_contracts.go b/integration-tests/contracts/ethereum_vrfv2_contracts.go index a9d1a93769d..df4a6fb9fbe 100644 --- a/integration-tests/contracts/ethereum_vrfv2_contracts.go +++ b/integration-tests/contracts/ethereum_vrfv2_contracts.go @@ -594,6 +594,30 @@ func (v *EthereumVRFCoordinatorV2) ParseLog(log types.Log) (generated.AbigenLog, return v.coordinator.ParseLog(log) } +func (v *EthereumVRFCoordinatorV2) GetLinkAddress(ctx context.Context) (common.Address, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + address, err := v.coordinator.LINK(opts) + if err != nil { + return common.Address{}, err + } + return address, nil +} + +func (v *EthereumVRFCoordinatorV2) GetLinkNativeFeed(ctx context.Context) (common.Address, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + address, err := v.coordinator.LINKETHFEED(opts) + if err != nil { + return common.Address{}, err + } + return address, nil +} + // CancelSubscription cancels subscription by Sub owner, // return funds to specified address, // checks if pending requests for a sub exist diff --git a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go index 8e099b4f6bc..9b286a1d057 100644 --- a/integration-tests/contracts/ethereum_vrfv2plus_contracts.go +++ b/integration-tests/contracts/ethereum_vrfv2plus_contracts.go @@ -340,6 +340,30 @@ func (v *EthereumVRFCoordinatorV2_5) GetBlockHashStoreAddress(ctx context.Contex return blockHashStoreAddress, nil } +func (v *EthereumVRFCoordinatorV2_5) GetLinkAddress(ctx context.Context) (common.Address, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + address, err := v.coordinator.LINK(opts) + if err != nil { + return common.Address{}, err + } + return address, nil +} + +func (v *EthereumVRFCoordinatorV2_5) GetLinkNativeFeed(ctx context.Context) (common.Address, error) { + opts := &bind.CallOpts{ + From: v.client.MustGetRootKeyAddress(), + Context: ctx, + } + address, err := v.coordinator.LINKNATIVEFEED(opts) + if err != nil { + return common.Address{}, err + } + return address, nil +} + // OwnerCancelSubscription cancels subscription by Coordinator owner // return funds to sub owner, // does not check if pending requests for a sub exist diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go index da2989d8fc9..a1ac5fd5544 100644 --- a/integration-tests/smoke/vrfv2plus_test.go +++ b/integration-tests/smoke/vrfv2plus_test.go @@ -289,16 +289,14 @@ func TestVRFv2Plus(t *testing.T) { t.Run("Direct Funding", func(t *testing.T) { configCopy := config.MustCopy().(tc.TestConfig) - wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment( + wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperUniverse( testcontext.Get(t), - l, sethClient, + vrfContracts, &configCopy, - vrfContracts.LinkToken, - vrfContracts.MockETHLINKFeed, - vrfContracts.CoordinatorV2Plus, vrfKey.KeyHash, 1, + l, ) require.NoError(t, err) @@ -307,7 +305,7 @@ func TestVRFv2Plus(t *testing.T) { testConfig := configCopy.VRFv2Plus.General var isNativeBilling = false - wrapperConsumerJuelsBalanceBeforeRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address()) + wrapperConsumerJuelsBalanceBeforeRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.WrapperConsumers[0].Address()) require.NoError(t, err, "error getting wrapper consumer balance") wrapperSubscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) @@ -315,7 +313,7 @@ func TestVRFv2Plus(t *testing.T) { subBalanceBeforeRequest := wrapperSubscription.Balance randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + wrapperContracts.WrapperConsumers[0], vrfContracts.CoordinatorV2Plus, vrfKey, wrapperSubID, @@ -331,13 +329,13 @@ func TestVRFv2Plus(t *testing.T) { subBalanceAfterRequest := wrapperSubscription.Balance require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) - consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, consumerStatus.Fulfilled) expectedWrapperConsumerJuelsBalance := new(big.Int).Sub(wrapperConsumerJuelsBalanceBeforeRequest, consumerStatus.Paid) - wrapperConsumerJuelsBalanceAfterRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.LoadTestConsumers[0].Address()) + wrapperConsumerJuelsBalanceAfterRequest, err := vrfContracts.LinkToken.BalanceOf(testcontext.Get(t), wrapperContracts.WrapperConsumers[0].Address()) require.NoError(t, err, "error getting wrapper consumer balance") require.Equal(t, expectedWrapperConsumerJuelsBalance, wrapperConsumerJuelsBalanceAfterRequest) @@ -356,7 +354,7 @@ func TestVRFv2Plus(t *testing.T) { testConfig := configCopy.VRFv2Plus.General var isNativeBilling = true - wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil) + wrapperConsumerBalanceBeforeRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.WrapperConsumers[0].Address()), nil) require.NoError(t, err, "error getting wrapper consumer balance") wrapperSubscription, err := vrfContracts.CoordinatorV2Plus.GetSubscription(testcontext.Get(t), wrapperSubID) @@ -364,7 +362,7 @@ func TestVRFv2Plus(t *testing.T) { subBalanceBeforeRequest := wrapperSubscription.NativeBalance randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + wrapperContracts.WrapperConsumers[0], vrfContracts.CoordinatorV2Plus, vrfKey, wrapperSubID, @@ -380,13 +378,13 @@ func TestVRFv2Plus(t *testing.T) { subBalanceAfterRequest := wrapperSubscription.NativeBalance require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) - consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, consumerStatus.Fulfilled) expectedWrapperConsumerWeiBalance := new(big.Int).Sub(wrapperConsumerBalanceBeforeRequestWei, consumerStatus.Paid) - wrapperConsumerBalanceAfterRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.LoadTestConsumers[0].Address()), nil) + wrapperConsumerBalanceAfterRequestWei, err := sethClient.Client.BalanceAt(testcontext.Get(t), common.HexToAddress(wrapperContracts.WrapperConsumers[0].Address()), nil) require.NoError(t, err, "error getting wrapper consumer balance") require.Equal(t, expectedWrapperConsumerWeiBalance, wrapperConsumerBalanceAfterRequestWei) @@ -1063,16 +1061,14 @@ func TestVRFv2PlusMigration(t *testing.T) { t.Run("Test migration of direct billing using VRFV2PlusWrapper subID", func(t *testing.T) { configCopy := config.MustCopy().(tc.TestConfig) - wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperEnvironment( + wrapperContracts, wrapperSubID, err := vrfv2plus.SetupVRFV2PlusWrapperUniverse( testcontext.Get(t), - l, sethClient, + vrfContracts, &configCopy, - vrfContracts.LinkToken, - vrfContracts.MockETHLINKFeed, - vrfContracts.CoordinatorV2Plus, vrfKey.KeyHash, 1, + l, ) require.NoError(t, err) subID := wrapperSubID @@ -1203,7 +1199,7 @@ func TestVRFv2PlusMigration(t *testing.T) { // Verify rand requests fulfills with Link Token billing isNativeBilling := false randomWordsFulfilledEvent, err := vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + wrapperContracts.WrapperConsumers[0], newCoordinator, vrfKey, subID, @@ -1212,14 +1208,14 @@ func TestVRFv2PlusMigration(t *testing.T) { l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") - consumerStatus, err := wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + consumerStatus, err := wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, consumerStatus.Fulfilled) // Verify rand requests fulfills with Native Token billing isNativeBilling = true randomWordsFulfilledEvent, err = vrfv2plus.DirectFundingRequestRandomnessAndWaitForFulfillment( - wrapperContracts.LoadTestConsumers[0], + wrapperContracts.WrapperConsumers[0], newCoordinator, vrfKey, subID, @@ -1228,7 +1224,7 @@ func TestVRFv2PlusMigration(t *testing.T) { l, ) require.NoError(t, err, "error requesting randomness and waiting for fulfilment") - consumerStatus, err = wrapperContracts.LoadTestConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) + consumerStatus, err = wrapperContracts.WrapperConsumers[0].GetRequestStatus(testcontext.Get(t), randomWordsFulfilledEvent.RequestId) require.NoError(t, err, "error getting rand request status") require.True(t, consumerStatus.Fulfilled) }) @@ -1347,11 +1343,10 @@ func TestVRFV2PlusWithBHS(t *testing.T) { }() if *configCopy.VRFv2Plus.General.GenerateTXsOnChain { + wg.Add(1) go func() { - _, err := actions.ContinuouslyGenerateTXsOnChain(sethClient, desiredBlockNumberReached, l) + _, err := actions.ContinuouslyGenerateTXsOnChain(sethClient, desiredBlockNumberReached, &wg, l) require.NoError(t, err) - // Wait to let the transactions be mined and avoid nonce issues - time.Sleep(time.Second * 5) }() } wg.Wait() diff --git a/integration-tests/testconfig/common/vrf/common.go b/integration-tests/testconfig/common/vrf/common.go index e213191075f..326f7c98c76 100644 --- a/integration-tests/testconfig/common/vrf/common.go +++ b/integration-tests/testconfig/common/vrf/common.go @@ -71,10 +71,13 @@ func (c *PerformanceConfig) Validate() error { type ExistingEnvConfig struct { CoordinatorAddress *string `toml:"coordinator_address"` + UseExistingWrapper *bool `toml:"use_existing_wrapper"` + WrapperAddress *string `toml:"wrapper_address"` ConsumerAddress *string `toml:"consumer_address"` - LinkAddress *string `toml:"link_address"` + WrapperConsumerAddress *string `toml:"wrapper_consumer_address"` KeyHash *string `toml:"key_hash"` CreateFundSubsAndAddConsumers *bool `toml:"create_fund_subs_and_add_consumers"` + CreateFundAddWrapperConsumers *bool `toml:"create_fund_add_wrapper_consumers"` NodeSendingKeys []string `toml:"node_sending_keys"` Funding } @@ -83,23 +86,33 @@ func (c *ExistingEnvConfig) Validate() error { if c.CreateFundSubsAndAddConsumers == nil { return errors.New("create_fund_subs_and_add_consumers must be set ") } + if c.CreateFundAddWrapperConsumers == nil { + return errors.New("create_fund_add_wrapper_consumers must be set ") + } if c.CoordinatorAddress == nil { return errors.New("coordinator_address must be set when using existing environment") } if !common.IsHexAddress(*c.CoordinatorAddress) { return errors.New("coordinator_address must be a valid hex address") } + if c.UseExistingWrapper == nil { + return errors.New("use_existing_wrapper must be set ") + } + if *c.UseExistingWrapper { + if c.WrapperAddress == nil { + return errors.New("wrapper_address must be set when using `use_existing_wrapper=true`") + } + if !common.IsHexAddress(*c.WrapperAddress) { + return errors.New("wrapper_address must be a valid hex address") + } + } if c.KeyHash == nil { return errors.New("key_hash must be set when using existing environment") } if *c.KeyHash == "" { return errors.New("key_hash must be a non-empty string") } - if *c.CreateFundSubsAndAddConsumers { - if err := c.Funding.Validate(); err != nil { - return err - } - } else { + if !*c.CreateFundSubsAndAddConsumers { if c.ConsumerAddress == nil || *c.ConsumerAddress == "" { return errors.New("consumer_address must be set when using existing environment") } @@ -107,7 +120,14 @@ func (c *ExistingEnvConfig) Validate() error { return errors.New("consumer_address must be a valid hex address") } } - + if !*c.CreateFundAddWrapperConsumers { + if c.WrapperConsumerAddress == nil || *c.WrapperConsumerAddress == "" { + return errors.New("wrapper_consumer_address must be set when using existing environment") + } + if !common.IsHexAddress(*c.WrapperConsumerAddress) { + return errors.New("wrapper_consumer_address must be a valid hex address") + } + } if c.NodeSendingKeys != nil { for _, key := range c.NodeSendingKeys { if !common.IsHexAddress(key) { @@ -115,7 +135,6 @@ func (c *ExistingEnvConfig) Validate() error { } } } - return nil } @@ -127,7 +146,6 @@ func (c *Funding) Validate() error { if c.NodeSendingKeyFundingMin != nil && *c.NodeSendingKeyFundingMin <= 0 { return errors.New("when set node_sending_key_funding_min must be a positive value") } - return nil } diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index d317d05bc49..e4e216cf4a8 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -413,3 +413,155 @@ gas_price_estimation_enabled = true gas_price_estimation_blocks = 100 # priority of the transaction, can be "fast", "standard" or "slow" (the higher the priority, the higher adjustment factor will be used for gas estimation) [default: "standard"] gas_price_estimation_tx_priority = "standard" + + +[[Seth.networks]] +name = "Nexon Mainnet" +transaction_timeout = "3m" +eip_1559_dynamic_fees = true +transfer_gas_fee = 21_000 + +# manual settings, used when gas_price_estimation_enabled is false or when it fails +# legacy transactions +gas_price = 30_000_000_000 + +# EIP-1559 transactions +gas_fee_cap = 30_000_000_000 +gas_tip_cap = 1_800_000_000 + + +[Network.EVMNetworks.NEXON_MAINNET] +evm_name = "NEXON_MAINNET" +#evm_urls = ["rpc ws endpoint"] +#evm_http_urls = ["rpc http endpoint"] +client_implementation = "Ethereum" +#evm_keys = ["private keys you want to use"] +evm_simulated = false +evm_chainlink_transaction_limit = 5000 +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_chain_id = 60118 + +[[Seth.networks]] +name = "Nexon Stage" +transaction_timeout = "3m" +eip_1559_dynamic_fees = true +transfer_gas_fee = 21_000 + +# manual settings, used when gas_price_estimation_enabled is false or when it fails +# legacy transactions +gas_price = 30_000_000_000 + +# EIP-1559 transactions +gas_fee_cap = 30_000_000_000 +gas_tip_cap = 1_800_000_000 + + +[Network.EVMNetworks.NEXON_STAGE] +evm_name = "NEXON_STAGE" +#evm_urls = ["rpc ws endpoint"] +#evm_http_urls = ["rpc http endpoint"] +client_implementation = "Ethereum" +#evm_keys = ["private keys you want to use"] +evm_simulated = false +evm_chainlink_transaction_limit = 5000 +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_chain_id = 847799 + + +#### + +[[Seth.networks]] +name = "Nexon QA" +transaction_timeout = "3m" +eip_1559_dynamic_fees = true +transfer_gas_fee = 21_000 + +# manual settings, used when gas_price_estimation_enabled is false or when it fails +# legacy transactions +gas_price = 30_000_000_000 + +# EIP-1559 transactions +gas_fee_cap = 30_000_000_000 +gas_tip_cap = 1_800_000_000 + + +[Network.EVMNetworks.NEXON_QA] +evm_name = "NEXON_QA" +#evm_urls = ["rpc ws endpoint"] +#evm_http_urls = ["rpc http endpoint"] +client_implementation = "Ethereum" +#evm_keys = ["private keys you want to use"] +evm_simulated = false +evm_chainlink_transaction_limit = 5000 +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_chain_id = 807424 + +##### + +[[Seth.networks]] +name = "Nexon Test" +transaction_timeout = "3m" +eip_1559_dynamic_fees = true +transfer_gas_fee = 21_000 + +# manual settings, used when gas_price_estimation_enabled is false or when it fails +# legacy transactions +gas_price = 30_000_000_000 + +# EIP-1559 transactions +gas_fee_cap = 30_000_000_000 +gas_tip_cap = 1_800_000_000 + + +[Network.EVMNetworks.NEXON_TEST] +evm_name = "NEXON_TEST" +#evm_urls = ["rpc ws endpoint"] +#evm_http_urls = ["rpc http endpoint"] +client_implementation = "Ethereum" +#evm_keys = ["private keys you want to use"] +evm_simulated = false +evm_chainlink_transaction_limit = 5000 +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_chain_id = 595581 + +##### +[[Seth.networks]] +name = "Nexon Dev" +transaction_timeout = "3m" +eip_1559_dynamic_fees = true +transfer_gas_fee = 21_000 + +# manual settings, used when gas_price_estimation_enabled is false or when it fails +# legacy transactions +gas_price = 30_000_000_000 + +# EIP-1559 transactions +gas_fee_cap = 30_000_000_000 +gas_tip_cap = 1_800_000_000 + + +[Network.EVMNetworks.NEXON_DEV] +evm_name = "NEXON_DEV" +#evm_urls = ["rpc ws endpoint"] +#evm_http_urls = ["rpc http endpoint"] +client_implementation = "Ethereum" +#evm_keys = ["private keys you want to use"] +evm_simulated = false +evm_chainlink_transaction_limit = 5000 +evm_minimum_confirmations = 1 +evm_gas_estimation_buffer = 10000 +evm_supports_eip1559 = true +evm_default_gas_limit = 6000000 +evm_chain_id = 5668 diff --git a/integration-tests/testconfig/vrfv2/config.go b/integration-tests/testconfig/vrfv2/config.go index 76d54a45d5b..5e940403961 100644 --- a/integration-tests/testconfig/vrfv2/config.go +++ b/integration-tests/testconfig/vrfv2/config.go @@ -3,8 +3,6 @@ package testconfig import ( "errors" - "github.com/ethereum/go-ethereum/common" - vrf_common_config "github.com/smartcontractkit/chainlink/integration-tests/testconfig/common/vrf" ) @@ -52,10 +50,6 @@ func (c *ExistingEnvConfig) Validate() error { if *c.SubID == 0 { return errors.New("sub_id must be positive value") } - - if c.LinkAddress != nil && !common.IsHexAddress(*c.LinkAddress) { - return errors.New("link_address must be a valid hex address") - } } return c.Funding.Validate() diff --git a/integration-tests/testconfig/vrfv2/vrfv2.toml b/integration-tests/testconfig/vrfv2/vrfv2.toml index 011e90c15fd..de7200b1e79 100644 --- a/integration-tests/testconfig/vrfv2/vrfv2.toml +++ b/integration-tests/testconfig/vrfv2/vrfv2.toml @@ -113,11 +113,17 @@ bhf_job_run_timeout = "1h" [VRFv2.ExistingEnv] coordinator_address = "" -consumer_address = "" -sub_id = 1 key_hash = "" + +use_existing_wrapper = false +wrapper_address = "" create_fund_subs_and_add_consumers = true -link_address = "" +sub_id = 1 +consumer_address = "" + +create_fund_add_wrapper_consumers = true +wrapper_consumer_address = "" + node_sending_key_funding_min = 10 node_sending_keys = [ "", diff --git a/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml index 8f8aa9530e7..88ca12975f6 100644 --- a/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml +++ b/integration-tests/testconfig/vrfv2plus/vrfv2plus.toml @@ -57,7 +57,7 @@ BatchSize = 100 """ [Common] -chainlink_node_funding = 0.5 +chainlink_node_funding = 0.7 [VRFv2Plus] [VRFv2Plus.General] @@ -138,11 +138,17 @@ bhf_job_run_timeout = "1h" [VRFv2Plus.ExistingEnv] coordinator_address = "" -consumer_address = "" -sub_id = "" key_hash = "" + +use_existing_wrapper = false +wrapper_address = "" create_fund_subs_and_add_consumers = true -link_address = "" +sub_id = "" +consumer_address = "" + +create_fund_add_wrapper_consumers = true +wrapper_consumer_address = "" + node_sending_key_funding_min = 1 node_sending_keys = [] @@ -272,7 +278,6 @@ key_hash = "0xd360445bacd26df47086ccf255c4f932d297ed8d5c7334b51eed32f61c541601" #key_hash = "0x2328cbee29e32d0b6662d6df82ff0fea7be300bd310561c92f515c9ee19464f1" #key_hash = "0x25f4e2d0509f42ec77db5380f3433a89fe623fa75f65d5b398d5f498327be4dd" create_fund_subs_and_add_consumers = true -link_address = "0x0Fd9e8d3aF1aaee056EB9e802c3A762a667b1904" node_sending_key_funding_min = 10 node_sending_keys = [ "0xD96013C241f1741C35a135321969f92Aae02A12F", @@ -407,7 +412,6 @@ consumer_address = "" sub_id = "" key_hash = "0xe13aa26fe94bfcd2ae055911f4d3bf1aed54ca6cf77af34e17f918802fd69ba1" create_fund_subs_and_add_consumers = true -link_address = "0xb1D4538B4571d411F07960EF2838Ce337FE1E80E" node_sending_key_funding_min = 20 node_sending_keys = [ "0xbE21ae371FcA1aC2d8A152e707D21e68d7d99252", @@ -551,7 +555,6 @@ consumer_address = "" sub_id = "" key_hash = "0x5b03254a80ea3eb72139ff0423cb88be42612780c3dd25f1d95a5ba7708a4be1" create_fund_subs_and_add_consumers = true -link_address = "0x0b9d5D9136855f6FEc3c0993feE6E9CE8a297846" node_sending_key_funding_min = 50 node_sending_keys = [ "0x3D7Da5D6A23CA2240CE576C8638C1798a023920a", @@ -676,7 +679,6 @@ consumer_address = "" sub_id = "" key_hash = "0xf5b4a359df0598eef89872ea2170f2afa844dbf74b417e6d44d4bda9420aceb2" create_fund_subs_and_add_consumers = true -link_address = "0x779877A7B0D9E8603169DdbD7836e478b4624789" node_sending_key_funding_min = 50 node_sending_keys = [ "0x0c0DC7f33A1256f0247c5ea75861d385fa5FED31", @@ -799,7 +801,6 @@ consumer_address = "" sub_id = "" key_hash = "0x4d43763d3eff849a89cf578a42787baa32132d7a80032125710e95b3972cd214" create_fund_subs_and_add_consumers = true -link_address = "0x84b9B910527Ad5C03A9Ca831909E21e236EA7b06" node_sending_key_funding_min = 150 node_sending_keys = [ "0x4EE2Cc6D50E8acb6BaEf673B03559525a6c92fB8", @@ -878,7 +879,6 @@ consumer_address = "" sub_id = "" key_hash = "0x7d5692e71807c4c02f5a109627a9ad2b12a361a346790a306983af9a5e3a186f" create_fund_subs_and_add_consumers = true -link_address = "0x92Bd61014c5BDc4A43BBbaAEa63d0694BE43ECDd" node_sending_key_funding_min = 30 node_sending_keys = [ "0xB97c0C52A2B957b45DA213e652c76090DDd0FEc6", @@ -965,7 +965,6 @@ consumer_address = "" sub_id = "" key_hash = "0xdc023892a41e5fe74ec7c4c2e8c0a808b01aea7acaf2b2ae30f4e08df877c48b" create_fund_subs_and_add_consumers = true -link_address = "0xE4DDEDb5A220eC218791dC35b1b4D737ba813EE7" node_sending_key_funding_min = 30 node_sending_keys = [ "0xF3d9879a75BBD85890056D7c6cB37C555F9b41A3", @@ -1051,7 +1050,6 @@ consumer_address = "" sub_id = "" key_hash = "0x0cb2a18e8b762cb4c8f7b17a6cc02ac7b9d2a3346f048cfd2f5d37677f8747d8" create_fund_subs_and_add_consumers = true -link_address = "0xD694472F1CD02E1f3fc3534386bda6802fCFe0f7" node_sending_key_funding_min = 30 node_sending_keys = [ "0xBFD780Af421e98C35918e10B9d6da7389C3e1D10", @@ -1138,7 +1136,6 @@ consumer_address = "" sub_id = "" key_hash = "0xbc9f525e3e1d9e2336f7c77d5f33f5b60aab3765944617fed7f66a6afecac616" create_fund_subs_and_add_consumers = true -link_address = "0x8E3f5E6dFeb4498437149b0d347ef51427dB1DE2" node_sending_key_funding_min = 30 node_sending_keys = [ ] From 7ec99efc64832750825f8bc6711fb9794d6e40df Mon Sep 17 00:00:00 2001 From: Matthew Pendrey Date: Mon, 5 Aug 2024 14:29:14 +0100 Subject: [PATCH 2/6] changes to support deterministic message hash in the remote target (#13935) --- .changeset/polite-crabs-pretend.md | 5 ++ .../keystone_contracts_setup.go | 60 +++++++++++-------- .../integration_tests/mock_libocr.go | 15 +++-- core/capabilities/integration_tests/setup.go | 11 ++-- .../integration_tests/streams_test.go | 6 +- core/capabilities/launcher.go | 2 + core/capabilities/launcher_test.go | 2 +- .../remote/target/endtoend_test.go | 2 +- core/capabilities/remote/target/server.go | 44 ++++++++++++-- .../capabilities/remote/target/server_test.go | 47 +++++++++++++-- core/capabilities/remote/trigger_publisher.go | 8 ++- .../remote/trigger_publisher_test.go | 2 +- .../capabilities/remote/trigger_subscriber.go | 8 ++- .../remote/trigger_subscriber_test.go | 2 +- core/capabilities/streams/trigger_test.go | 2 +- core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 +- ...deploy_initialize_capabilities_registry.go | 15 +++-- core/services/registrysyncer/syncer.go | 26 +++++--- core/services/registrysyncer/syncer_test.go | 3 +- go.mod | 2 +- go.sum | 4 +- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 +- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 +- 26 files changed, 202 insertions(+), 82 deletions(-) create mode 100644 .changeset/polite-crabs-pretend.md diff --git a/.changeset/polite-crabs-pretend.md b/.changeset/polite-crabs-pretend.md new file mode 100644 index 00000000000..f8ea63b45c1 --- /dev/null +++ b/.changeset/polite-crabs-pretend.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#internal ensure remote target request hash is deterministic diff --git a/core/capabilities/integration_tests/keystone_contracts_setup.go b/core/capabilities/integration_tests/keystone_contracts_setup.go index 42269d1bd45..004a4c32a3a 100644 --- a/core/capabilities/integration_tests/keystone_contracts_setup.go +++ b/core/capabilities/integration_tests/keystone_contracts_setup.go @@ -91,8 +91,8 @@ func peerToNode(nopID uint32, p peer) (kcr.CapabilitiesRegistryNodeParams, error }, nil } -func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workflowDonPeers []peer, triggerDonPeers []peer, - targetDonPeerIDs []peer, +func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workflowDon donInfo, triggerDon donInfo, + targetDon donInfo, transactOpts *bind.TransactOpts, backend *ethBackend) common.Address { addr, _, reg, err := kcr.DeployCapabilitiesRegistry(transactOpts, backend) require.NoError(t, err) @@ -157,7 +157,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl nopID := recLog.NodeOperatorId nodes := []kcr.CapabilitiesRegistryNodeParams{} - for _, wfPeer := range workflowDonPeers { + for _, wfPeer := range workflowDon.peerIDs { n, innerErr := peerToNode(nopID, wfPeer) require.NoError(t, innerErr) @@ -165,7 +165,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl nodes = append(nodes, n) } - for _, triggerPeer := range triggerDonPeers { + for _, triggerPeer := range triggerDon.peerIDs { n, innerErr := peerToNode(nopID, triggerPeer) require.NoError(t, innerErr) @@ -173,7 +173,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl nodes = append(nodes, n) } - for _, targetPeer := range targetDonPeerIDs { + for _, targetPeer := range targetDon.peerIDs { n, innerErr := peerToNode(nopID, targetPeer) require.NoError(t, innerErr) @@ -185,7 +185,7 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl require.NoError(t, err) // workflow DON - ps, err := peers(workflowDonPeers) + ps, err := peers(workflowDon.peerIDs) require.NoError(t, err) cc := newCapabilityConfig() @@ -199,22 +199,24 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl }, } - workflowDonF := uint8(2) - _, err = reg.AddDON(transactOpts, ps, cfgs, false, true, workflowDonF) + _, err = reg.AddDON(transactOpts, ps, cfgs, false, true, workflowDon.F) require.NoError(t, err) // trigger DON - ps, err = peers(triggerDonPeers) + ps, err = peers(triggerDon.peerIDs) require.NoError(t, err) - triggerDonF := 1 - config := &pb.RemoteTriggerConfig{ - RegistrationRefresh: durationpb.New(20000 * time.Millisecond), - RegistrationExpiry: durationpb.New(60000 * time.Millisecond), - // F + 1 - MinResponsesToAggregate: uint32(triggerDonF) + 1, + triggerCapabilityConfig := newCapabilityConfig() + triggerCapabilityConfig.RemoteConfig = &pb.CapabilityConfig_RemoteTriggerConfig{ + RemoteTriggerConfig: &pb.RemoteTriggerConfig{ + RegistrationRefresh: durationpb.New(60000 * time.Millisecond), + RegistrationExpiry: durationpb.New(60000 * time.Millisecond), + // F + 1 + MinResponsesToAggregate: uint32(triggerDon.F) + 1, + }, } - configb, err := proto.Marshal(config) + + configb, err := proto.Marshal(triggerCapabilityConfig) require.NoError(t, err) cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{ @@ -224,22 +226,31 @@ func setupCapabilitiesRegistryContract(ctx context.Context, t *testing.T, workfl }, } - _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, uint8(triggerDonF)) + _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, triggerDon.F) require.NoError(t, err) // target DON - ps, err = peers(targetDonPeerIDs) + ps, err = peers(targetDon.peerIDs) + require.NoError(t, err) + + targetCapabilityConfig := newCapabilityConfig() + targetCapabilityConfig.RemoteConfig = &pb.CapabilityConfig_RemoteTargetConfig{ + RemoteTargetConfig: &pb.RemoteTargetConfig{ + RequestHashExcludedAttributes: []string{"signed_report.Signatures"}, + }, + } + + remoteTargetConfigBytes, err := proto.Marshal(targetCapabilityConfig) require.NoError(t, err) cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{ { CapabilityId: wid, - Config: ccb, + Config: remoteTargetConfigBytes, }, } - targetDonF := uint8(1) - _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, targetDonF) + _, err = reg.AddDON(transactOpts, ps, cfgs, true, false, targetDon.F) require.NoError(t, err) backend.Commit() @@ -253,19 +264,18 @@ func newCapabilityConfig() *pb.CapabilityConfig { } } -func setupForwarderContract(t *testing.T, workflowDonPeers []peer, workflowDonId uint32, - configVersion uint32, f uint8, +func setupForwarderContract(t *testing.T, workflowDon donInfo, transactOpts *bind.TransactOpts, backend *ethBackend) (common.Address, *forwarder.KeystoneForwarder) { addr, _, fwd, err := forwarder.DeployKeystoneForwarder(transactOpts, backend) require.NoError(t, err) backend.Commit() var signers []common.Address - for _, p := range workflowDonPeers { + for _, p := range workflowDon.peerIDs { signers = append(signers, common.HexToAddress(p.Signer)) } - _, err = fwd.SetConfig(transactOpts, workflowDonId, configVersion, f, signers) + _, err = fwd.SetConfig(transactOpts, workflowDon.ID, workflowDon.ConfigVersion, workflowDon.F, signers) require.NoError(t, err) backend.Commit() diff --git a/core/capabilities/integration_tests/mock_libocr.go b/core/capabilities/integration_tests/mock_libocr.go index 39c53d48aff..14ccdce6000 100644 --- a/core/capabilities/integration_tests/mock_libocr.go +++ b/core/capabilities/integration_tests/mock_libocr.go @@ -157,10 +157,6 @@ func (m *mockLibOCR) simulateProtocolRound(ctx context.Context) error { Signer: commontypes.OracleID(i), Signature: sig, }) - - if uint8(len(signatures)) == m.f+1 { - break - } } for _, node := range m.nodes { @@ -181,7 +177,16 @@ func (m *mockLibOCR) simulateProtocolRound(ctx context.Context) error { continue } - err = node.Transmit(ctx, types.ConfigDigest{}, 0, report, signatures) + // For each node select a random set of f+1 signatures to mimic libocr behaviour + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + indices := r.Perm(len(signatures)) + selectedSignatures := make([]types.AttributedOnchainSignature, m.f+1) + for i := 0; i < int(m.f+1); i++ { + selectedSignatures[i] = signatures[indices[i]] + } + + err = node.Transmit(ctx, types.ConfigDigest{}, 0, report, selectedSignatures) if err != nil { return fmt.Errorf("failed to transmit report: %w", err) } diff --git a/core/capabilities/integration_tests/setup.go b/core/capabilities/integration_tests/setup.go index 0095d2fd9de..69b8c3eaa0a 100644 --- a/core/capabilities/integration_tests/setup.go +++ b/core/capabilities/integration_tests/setup.go @@ -68,8 +68,8 @@ func setupStreamDonsWithTransmissionSchedule(ctx context.Context, t *testing.T, lggr.SetLogLevel(TestLogLevel) ethBlockchain, transactor := setupBlockchain(t, 1000, 1*time.Second) - capabilitiesRegistryAddr := setupCapabilitiesRegistryContract(ctx, t, workflowDonInfo.peerIDs, triggerDonInfo.peerIDs, targetDonInfo.peerIDs, transactor, ethBlockchain) - forwarderAddr, _ := setupForwarderContract(t, workflowDonInfo.peerIDs, workflowDonInfo.ID, 1, workflowDonInfo.F, transactor, ethBlockchain) + capabilitiesRegistryAddr := setupCapabilitiesRegistryContract(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, transactor, ethBlockchain) + forwarderAddr, _ := setupForwarderContract(t, workflowDonInfo, transactor, ethBlockchain) consumerAddr, consumer := setupConsumerContract(t, transactor, ethBlockchain, forwarderAddr, workflowOwnerID, workflowName) var feedIDs []string @@ -259,9 +259,10 @@ func createDonInfo(t *testing.T, don don) donInfo { triggerDonInfo := donInfo{ DON: commoncap.DON{ - ID: don.id, - Members: donPeers, - F: don.f, + ID: don.id, + Members: donPeers, + F: don.f, + ConfigVersion: 1, }, peerIDs: peerIDs, keys: donKeys, diff --git a/core/capabilities/integration_tests/streams_test.go b/core/capabilities/integration_tests/streams_test.go index 6216e36c856..7be392932f8 100644 --- a/core/capabilities/integration_tests/streams_test.go +++ b/core/capabilities/integration_tests/streams_test.go @@ -22,9 +22,9 @@ func Test_AllAtOnceTransmissionSchedule(t *testing.T) { // The don IDs set in the below calls are inferred from the order in which the dons are added to the capabilities registry // in the setupCapabilitiesRegistryContract function, should this order change the don IDs will need updating. - workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 5, f: 1}) - triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 1}) - targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 1}) + workflowDonInfo := createDonInfo(t, don{id: 1, numNodes: 7, f: 2}) + triggerDonInfo := createDonInfo(t, don{id: 2, numNodes: 7, f: 2}) + targetDonInfo := createDonInfo(t, don{id: 3, numNodes: 4, f: 2}) consumer, feedIDs, triggerSink := setupStreamDonsWithTransmissionSchedule(ctx, t, workflowDonInfo, triggerDonInfo, targetDonInfo, 3, "2s", "allAtOnce") diff --git a/core/capabilities/launcher.go b/core/capabilities/launcher.go index b4ade04127b..b30477e4c83 100644 --- a/core/capabilities/launcher.go +++ b/core/capabilities/launcher.go @@ -216,6 +216,7 @@ func (w *launcher) addRemoteCapabilities(ctx context.Context, myDON registrysync int(remoteDON.F+1), w.lggr, ) + // TODO: We need to implement a custom, Mercury-specific // aggregator here, because there is no guarantee that // all trigger events in the workflow will have the same @@ -358,6 +359,7 @@ func (w *launcher) exposeCapabilities(ctx context.Context, myPeerID p2ptypes.Pee case capabilities.CapabilityTypeTarget: newTargetServer := func(capability capabilities.BaseCapability, info capabilities.CapabilityInfo) (receiverService, error) { return target.NewServer( + c.RemoteTargetConfig, myPeerID, capability.(capabilities.TargetCapability), info, diff --git a/core/capabilities/launcher_test.go b/core/capabilities/launcher_test.go index fb3e6837d00..82b03edcecb 100644 --- a/core/capabilities/launcher_test.go +++ b/core/capabilities/launcher_test.go @@ -323,7 +323,7 @@ func TestLauncher_WiresUpClientsForPublicWorkflowDON(t *testing.T) { // The below state describes a Workflow DON (AcceptsWorkflows = true), // which exposes the streams-trigger and write_chain capabilities. // We expect receivers to be wired up and both capabilities to be added to the registry. - var rtc capabilities.RemoteTriggerConfig + rtc := &capabilities.RemoteTriggerConfig{} rtc.ApplyDefaults() state := ®istrysyncer.LocalRegistry{ diff --git a/core/capabilities/remote/target/endtoend_test.go b/core/capabilities/remote/target/endtoend_test.go index 9bbb53d4f66..cfab50f0fe7 100644 --- a/core/capabilities/remote/target/endtoend_test.go +++ b/core/capabilities/remote/target/endtoend_test.go @@ -226,7 +226,7 @@ func testRemoteTarget(ctx context.Context, t *testing.T, underlying commoncap.Ta for i := 0; i < numCapabilityPeers; i++ { capabilityPeer := capabilityPeers[i] capabilityDispatcher := broker.NewDispatcherForNode(capabilityPeer) - capabilityNode := target.NewServer(capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher, + capabilityNode := target.NewServer(&commoncap.RemoteTargetConfig{RequestHashExcludedAttributes: []string{}}, capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher, capabilityNodeResponseTimeout, lggr) servicetest.Run(t, capabilityNode) broker.RegisterReceiverNode(capabilityPeer, capabilityNode) diff --git a/core/capabilities/remote/target/server.go b/core/capabilities/remote/target/server.go index ea9caf81eff..39023ffb3fa 100644 --- a/core/capabilities/remote/target/server.go +++ b/core/capabilities/remote/target/server.go @@ -4,10 +4,12 @@ import ( "context" "crypto/sha256" "encoding/hex" + "fmt" "sync" "time" commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities" + "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" "github.com/smartcontractkit/chainlink-common/pkg/services" "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/target/request" "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types" @@ -24,7 +26,9 @@ import ( // server communicates with corresponding client on remote nodes. type server struct { services.StateMachine - lggr logger.Logger + lggr logger.Logger + + config *commoncap.RemoteTargetConfig peerID p2ptypes.PeerID underlying commoncap.TargetCapability capInfo commoncap.CapabilityInfo @@ -51,9 +55,14 @@ type requestAndMsgID struct { messageID string } -func NewServer(peerID p2ptypes.PeerID, underlying commoncap.TargetCapability, capInfo commoncap.CapabilityInfo, localDonInfo commoncap.DON, +func NewServer(config *commoncap.RemoteTargetConfig, peerID p2ptypes.PeerID, underlying commoncap.TargetCapability, capInfo commoncap.CapabilityInfo, localDonInfo commoncap.DON, workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, requestTimeout time.Duration, lggr logger.Logger) *server { + if config == nil { + lggr.Info("no config provided, using default values") + config = &commoncap.RemoteTargetConfig{} + } return &server{ + config: config, underlying: underlying, peerID: peerID, capInfo: capInfo, @@ -126,11 +135,16 @@ func (r *server) Receive(ctx context.Context, msg *types.MessageBody) { return } + msgHash, err := r.getMessageHash(msg) + if err != nil { + r.lggr.Errorw("failed to get message hash", "err", err) + return + } + // A request is uniquely identified by the message id and the hash of the payload to prevent a malicious // actor from sending a different payload with the same message id messageId := GetMessageID(msg) - hash := sha256.Sum256(msg.Payload) - requestID := messageId + hex.EncodeToString(hash[:]) + requestID := messageId + hex.EncodeToString(msgHash[:]) if requestIDs, ok := r.messageIDToRequestIDsCount[messageId]; ok { requestIDs[requestID] = requestIDs[requestID] + 1 @@ -161,12 +175,32 @@ func (r *server) Receive(ctx context.Context, msg *types.MessageBody) { reqAndMsgID := r.requestIDToRequest[requestID] - err := reqAndMsgID.request.OnMessage(ctx, msg) + err = reqAndMsgID.request.OnMessage(ctx, msg) if err != nil { r.lggr.Errorw("request failed to OnMessage new message", "request", reqAndMsgID, "err", err) } } +func (r *server) getMessageHash(msg *types.MessageBody) ([32]byte, error) { + req, err := pb.UnmarshalCapabilityRequest(msg.Payload) + if err != nil { + return [32]byte{}, fmt.Errorf("failed to unmarshal capability request: %w", err) + } + + for _, path := range r.config.RequestHashExcludedAttributes { + if !req.Inputs.DeleteAtPath(path) { + return [32]byte{}, fmt.Errorf("failed to delete attribute from map at path: %s", path) + } + } + + reqBytes, err := pb.MarshalCapabilityRequest(req) + if err != nil { + return [32]byte{}, fmt.Errorf("failed to marshal capability request: %w", err) + } + hash := sha256.Sum256(reqBytes) + return hash, nil +} + func GetMessageID(msg *types.MessageBody) string { return string(msg.MessageId) } diff --git a/core/capabilities/remote/target/server_test.go b/core/capabilities/remote/target/server_test.go index a5aa45efd06..2460a2dd0f7 100644 --- a/core/capabilities/remote/target/server_test.go +++ b/core/capabilities/remote/target/server_test.go @@ -2,6 +2,7 @@ package target_test import ( "context" + "strconv" "testing" "time" @@ -11,6 +12,7 @@ import ( commoncap "github.com/smartcontractkit/chainlink-common/pkg/capabilities" "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" "github.com/smartcontractkit/chainlink-common/pkg/services" + "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/target" remotetypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/remote/types" "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" @@ -18,12 +20,48 @@ import ( p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types" ) +func Test_Server_ExcludesNonDeterministicInputAttributes(t *testing.T) { + ctx := testutils.Context(t) + + numCapabilityPeers := 4 + + callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{RequestHashExcludedAttributes: []string{"signed_report.Signatures"}}, + &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute) + + for idx, caller := range callers { + rawInputs := map[string]any{ + "signed_report": map[string]any{"Signatures": "sig" + strconv.Itoa(idx), "Price": 20}, + } + + inputs, err := values.NewMap(rawInputs) + require.NoError(t, err) + + _, err = caller.Execute(context.Background(), + commoncap.CapabilityRequest{ + Metadata: commoncap.RequestMetadata{ + WorkflowID: "workflowID", + WorkflowExecutionID: "workflowExecutionID", + }, + Inputs: inputs, + }) + require.NoError(t, err) + } + + for _, caller := range callers { + for i := 0; i < numCapabilityPeers; i++ { + msg := <-caller.receivedMessages + assert.Equal(t, remotetypes.Error_OK, msg.Error) + } + } + closeServices(t, srvcs) +} + func Test_Server_RespondsAfterSufficientRequests(t *testing.T) { ctx := testutils.Context(t) numCapabilityPeers := 4 - callers, srvcs := testRemoteTargetServer(ctx, t, &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute) + callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestCapability{}, 10, 9, numCapabilityPeers, 3, 10*time.Minute) for _, caller := range callers { _, err := caller.Execute(context.Background(), @@ -50,7 +88,7 @@ func Test_Server_InsufficientCallers(t *testing.T) { numCapabilityPeers := 4 - callers, srvcs := testRemoteTargetServer(ctx, t, &TestCapability{}, 10, 10, numCapabilityPeers, 3, 100*time.Millisecond) + callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestCapability{}, 10, 10, numCapabilityPeers, 3, 100*time.Millisecond) for _, caller := range callers { _, err := caller.Execute(context.Background(), @@ -77,7 +115,7 @@ func Test_Server_CapabilityError(t *testing.T) { numCapabilityPeers := 4 - callers, srvcs := testRemoteTargetServer(ctx, t, &TestErrorCapability{}, 10, 9, numCapabilityPeers, 3, 100*time.Millisecond) + callers, srvcs := testRemoteTargetServer(ctx, t, &commoncap.RemoteTargetConfig{}, &TestErrorCapability{}, 10, 9, numCapabilityPeers, 3, 100*time.Millisecond) for _, caller := range callers { _, err := caller.Execute(context.Background(), @@ -100,6 +138,7 @@ func Test_Server_CapabilityError(t *testing.T) { } func testRemoteTargetServer(ctx context.Context, t *testing.T, + config *commoncap.RemoteTargetConfig, underlying commoncap.TargetCapability, numWorkflowPeers int, workflowDonF uint8, numCapabilityPeers int, capabilityDonF uint8, capabilityNodeResponseTimeout time.Duration) ([]*serverTestClient, []services.Service) { @@ -150,7 +189,7 @@ func testRemoteTargetServer(ctx context.Context, t *testing.T, for i := 0; i < numCapabilityPeers; i++ { capabilityPeer := capabilityPeers[i] capabilityDispatcher := broker.NewDispatcherForNode(capabilityPeer) - capabilityNode := target.NewServer(capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher, + capabilityNode := target.NewServer(config, capabilityPeer, underlying, capInfo, capDonInfo, workflowDONs, capabilityDispatcher, capabilityNodeResponseTimeout, lggr) require.NoError(t, capabilityNode.Start(ctx)) broker.RegisterReceiverNode(capabilityPeer, capabilityNode) diff --git a/core/capabilities/remote/trigger_publisher.go b/core/capabilities/remote/trigger_publisher.go index 35ce41118f5..c1f2fb32c5a 100644 --- a/core/capabilities/remote/trigger_publisher.go +++ b/core/capabilities/remote/trigger_publisher.go @@ -21,7 +21,7 @@ import ( // // TriggerPublisher communicates with corresponding TriggerSubscribers on remote nodes. type triggerPublisher struct { - config capabilities.RemoteTriggerConfig + config *capabilities.RemoteTriggerConfig underlying commoncap.TriggerCapability capInfo commoncap.CapabilityInfo capDonInfo commoncap.DON @@ -48,7 +48,11 @@ type pubRegState struct { var _ types.Receiver = &triggerPublisher{} var _ services.Service = &triggerPublisher{} -func NewTriggerPublisher(config capabilities.RemoteTriggerConfig, underlying commoncap.TriggerCapability, capInfo commoncap.CapabilityInfo, capDonInfo commoncap.DON, workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, lggr logger.Logger) *triggerPublisher { +func NewTriggerPublisher(config *capabilities.RemoteTriggerConfig, underlying commoncap.TriggerCapability, capInfo commoncap.CapabilityInfo, capDonInfo commoncap.DON, workflowDONs map[uint32]commoncap.DON, dispatcher types.Dispatcher, lggr logger.Logger) *triggerPublisher { + if config == nil { + lggr.Info("no config provided, using default values") + config = &capabilities.RemoteTriggerConfig{} + } config.ApplyDefaults() return &triggerPublisher{ config: config, diff --git a/core/capabilities/remote/trigger_publisher_test.go b/core/capabilities/remote/trigger_publisher_test.go index 1e3000d20ca..2c4a8518965 100644 --- a/core/capabilities/remote/trigger_publisher_test.go +++ b/core/capabilities/remote/trigger_publisher_test.go @@ -42,7 +42,7 @@ func TestTriggerPublisher_Register(t *testing.T) { } dispatcher := remoteMocks.NewDispatcher(t) - config := capabilities.RemoteTriggerConfig{ + config := &capabilities.RemoteTriggerConfig{ RegistrationRefresh: 100 * time.Millisecond, RegistrationExpiry: 100 * time.Second, MinResponsesToAggregate: 1, diff --git a/core/capabilities/remote/trigger_subscriber.go b/core/capabilities/remote/trigger_subscriber.go index 0ccbf37c61a..2d038e45c08 100644 --- a/core/capabilities/remote/trigger_subscriber.go +++ b/core/capabilities/remote/trigger_subscriber.go @@ -23,7 +23,7 @@ import ( // // TriggerSubscriber communicates with corresponding TriggerReceivers on remote nodes. type triggerSubscriber struct { - config capabilities.RemoteTriggerConfig + config *capabilities.RemoteTriggerConfig capInfo commoncap.CapabilityInfo capDonInfo capabilities.DON capDonMembers map[p2ptypes.PeerID]struct{} @@ -55,11 +55,15 @@ var _ services.Service = &triggerSubscriber{} // TODO makes this configurable with a default const defaultSendChannelBufferSize = 1000 -func NewTriggerSubscriber(config capabilities.RemoteTriggerConfig, capInfo commoncap.CapabilityInfo, capDonInfo capabilities.DON, localDonInfo capabilities.DON, dispatcher types.Dispatcher, aggregator types.Aggregator, lggr logger.Logger) *triggerSubscriber { +func NewTriggerSubscriber(config *capabilities.RemoteTriggerConfig, capInfo commoncap.CapabilityInfo, capDonInfo capabilities.DON, localDonInfo capabilities.DON, dispatcher types.Dispatcher, aggregator types.Aggregator, lggr logger.Logger) *triggerSubscriber { if aggregator == nil { lggr.Warnw("no aggregator provided, using default MODE aggregator", "capabilityId", capInfo.ID) aggregator = NewDefaultModeAggregator(uint32(capDonInfo.F + 1)) } + if config == nil { + lggr.Info("no config provided, using default values") + config = &capabilities.RemoteTriggerConfig{} + } config.ApplyDefaults() capDonMembers := make(map[p2ptypes.PeerID]struct{}) for _, member := range capDonInfo.Members { diff --git a/core/capabilities/remote/trigger_subscriber_test.go b/core/capabilities/remote/trigger_subscriber_test.go index 93e962215ab..2e34b03ec5c 100644 --- a/core/capabilities/remote/trigger_subscriber_test.go +++ b/core/capabilities/remote/trigger_subscriber_test.go @@ -63,7 +63,7 @@ func TestTriggerSubscriber_RegisterAndReceive(t *testing.T) { }) // register trigger - config := capabilities.RemoteTriggerConfig{ + config := &capabilities.RemoteTriggerConfig{ RegistrationRefresh: 100 * time.Millisecond, RegistrationExpiry: 100 * time.Second, MinResponsesToAggregate: 1, diff --git a/core/capabilities/streams/trigger_test.go b/core/capabilities/streams/trigger_test.go index cb4cfaa36bc..853f07f2aae 100644 --- a/core/capabilities/streams/trigger_test.go +++ b/core/capabilities/streams/trigger_test.go @@ -87,7 +87,7 @@ func TestStreamsTrigger(t *testing.T) { Members: capMembers, F: uint8(F), } - config := capabilities.RemoteTriggerConfig{ + config := &capabilities.RemoteTriggerConfig{ MinResponsesToAggregate: uint32(F + 1), } subscriber := remote.NewTriggerSubscriber(config, capInfo, capDonInfo, capabilities.DON{}, nil, agg, lggr) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 4ee443d46f8..4e250038710 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -22,7 +22,7 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 github.com/spf13/cobra v1.8.0 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 3ae26beb633..4c8eee4a1db 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1184,8 +1184,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go index 87622415430..3352267d149 100644 --- a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go +++ b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go @@ -11,10 +11,11 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" + ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" + capabilitiespb "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" "github.com/smartcontractkit/chainlink-common/pkg/values" @@ -373,8 +374,14 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { panic(err) } - cc = newCapabilityConfig() - ccb, err = proto.Marshal(cc) + targetCapabilityConfig := newCapabilityConfig() + targetCapabilityConfig.RemoteConfig = &capabilitiespb.CapabilityConfig_RemoteTargetConfig{ + RemoteTargetConfig: &capabilitiespb.RemoteTargetConfig{ + RequestHashExcludedAttributes: []string{"signed_report.Signatures"}, + }, + } + + remoteTargetConfigBytes, err := proto.Marshal(targetCapabilityConfig) if err != nil { panic(err) } @@ -382,7 +389,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { cfgs = []kcr.CapabilitiesRegistryCapabilityConfiguration{ { CapabilityId: wid, - Config: ccb, + Config: remoteTargetConfigBytes, }, } _, err = reg.AddDON(env.Owner, ps, cfgs, true, false, 1) diff --git a/core/services/registrysyncer/syncer.go b/core/services/registrysyncer/syncer.go index 4bbfaef5040..9675d86dc86 100644 --- a/core/services/registrysyncer/syncer.go +++ b/core/services/registrysyncer/syncer.go @@ -165,12 +165,21 @@ func unmarshalCapabilityConfig(data []byte) (capabilities.CapabilityConfiguratio return capabilities.CapabilityConfiguration{}, err } - var rtc capabilities.RemoteTriggerConfig - if prtc := cconf.GetRemoteTriggerConfig(); prtc != nil { - rtc.RegistrationRefresh = prtc.RegistrationRefresh.AsDuration() - rtc.RegistrationExpiry = prtc.RegistrationExpiry.AsDuration() - rtc.MinResponsesToAggregate = prtc.MinResponsesToAggregate - rtc.MessageExpiry = prtc.MessageExpiry.AsDuration() + var remoteTriggerConfig *capabilities.RemoteTriggerConfig + var remoteTargetConfig *capabilities.RemoteTargetConfig + + switch cconf.GetRemoteConfig().(type) { + case *capabilitiespb.CapabilityConfig_RemoteTriggerConfig: + prtc := cconf.GetRemoteTriggerConfig() + remoteTriggerConfig = &capabilities.RemoteTriggerConfig{} + remoteTriggerConfig.RegistrationRefresh = prtc.RegistrationRefresh.AsDuration() + remoteTriggerConfig.RegistrationExpiry = prtc.RegistrationExpiry.AsDuration() + remoteTriggerConfig.MinResponsesToAggregate = prtc.MinResponsesToAggregate + remoteTriggerConfig.MessageExpiry = prtc.MessageExpiry.AsDuration() + case *capabilitiespb.CapabilityConfig_RemoteTargetConfig: + prtc := cconf.GetRemoteTargetConfig() + remoteTargetConfig = &capabilities.RemoteTargetConfig{} + remoteTargetConfig.RequestHashExcludedAttributes = prtc.RequestHashExcludedAttributes } dc, err := values.FromMapValueProto(cconf.DefaultConfig) @@ -180,7 +189,8 @@ func unmarshalCapabilityConfig(data []byte) (capabilities.CapabilityConfiguratio return capabilities.CapabilityConfiguration{ DefaultConfig: dc, - RemoteTriggerConfig: rtc, + RemoteTriggerConfig: remoteTriggerConfig, + RemoteTargetConfig: remoteTargetConfig, }, nil } @@ -223,8 +233,6 @@ func (s *registrySyncer) localRegistry(ctx context.Context) (*LocalRegistry, err return nil, innerErr } - cconf.RemoteTriggerConfig.ApplyDefaults() - cc[cid] = cconf } diff --git a/core/services/registrysyncer/syncer_test.go b/core/services/registrysyncer/syncer_test.go index b926183394e..c13cc904909 100644 --- a/core/services/registrysyncer/syncer_test.go +++ b/core/services/registrysyncer/syncer_test.go @@ -210,6 +210,7 @@ func TestReader_Integration(t *testing.T) { RegistrationExpiry: durationpb.New(60 * time.Second), // F + 1 MinResponsesToAggregate: uint32(1) + 1, + MessageExpiry: durationpb.New(120 * time.Second), }, }, } @@ -256,7 +257,7 @@ func TestReader_Integration(t *testing.T) { }, gotCap) assert.Len(t, s.IDsToDONs, 1) - rtc := capabilities.RemoteTriggerConfig{ + rtc := &capabilities.RemoteTriggerConfig{ RegistrationRefresh: 20 * time.Second, MinResponsesToAggregate: 2, RegistrationExpiry: 60 * time.Second, diff --git a/go.mod b/go.mod index 8e2103eb246..3aa878d1ab2 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chain-selectors v1.0.10 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 diff --git a/go.sum b/go.sum index 73d6d5b227a..0264dcc01c3 100644 --- a/go.sum +++ b/go.sum @@ -1136,8 +1136,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index ed693f4fccc..d7c1918c927 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -28,7 +28,7 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 github.com/smartcontractkit/chainlink-testing-framework v1.33.0 github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index ca3ce8d903e..2854b2d599b 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1486,8 +1486,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index 3d1ae6c7a98..f0485082ecb 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -16,7 +16,7 @@ require ( github.com/rs/zerolog v1.31.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 github.com/smartcontractkit/chainlink-testing-framework v1.33.0 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8 diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 2a54ec9254f..647a02b9b0e 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1468,8 +1468,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc h1:nNZqLasN8y5huDKX76JUZtni7WkUI36J61//czbJpDM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240731184516-249ef7ad0cdc/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= From d90bb66934a46bb1c6d376b000d860e1588d91c7 Mon Sep 17 00:00:00 2001 From: Matthew Pendrey Date: Mon, 5 Aug 2024 17:46:31 +0100 Subject: [PATCH 3/6] common version update to head of develop (#14030) --- .changeset/ninety-cougars-tease.md | 5 +++++ core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 ++-- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 ++-- 9 files changed, 17 insertions(+), 12 deletions(-) create mode 100644 .changeset/ninety-cougars-tease.md diff --git a/.changeset/ninety-cougars-tease.md b/.changeset/ninety-cougars-tease.md new file mode 100644 index 00000000000..ab12a571914 --- /dev/null +++ b/.changeset/ninety-cougars-tease.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +#internal restore common version to head of develop diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 4e250038710..fe4ee2c9748 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -22,7 +22,7 @@ require ( github.com/prometheus/client_golang v1.17.0 github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20240717100443-f6226e09bee7 github.com/spf13/cobra v1.8.0 diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 4c8eee4a1db..76eaf615279 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1184,8 +1184,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/go.mod b/go.mod index 3aa878d1ab2..45e0b62d52e 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,7 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chain-selectors v1.0.10 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f github.com/smartcontractkit/chainlink-feeds v0.0.0-20240710170203-5b41615da827 diff --git a/go.sum b/go.sum index 0264dcc01c3..4a6b294c122 100644 --- a/go.sum +++ b/go.sum @@ -1136,8 +1136,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index d7c1918c927..8f9652099b1 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -28,7 +28,7 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c github.com/smartcontractkit/chainlink-testing-framework v1.33.0 github.com/smartcontractkit/chainlink-testing-framework/grafana v0.0.0-20240405215812-5a72bc9af239 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 2854b2d599b..bca92f4a97c 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1486,8 +1486,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index f0485082ecb..f0554f2c725 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -16,7 +16,7 @@ require ( github.com/rs/zerolog v1.31.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.4 - github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 + github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c github.com/smartcontractkit/chainlink-testing-framework v1.33.0 github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20240214231432-4ad5eb95178c github.com/smartcontractkit/chainlink/v2 v2.9.0-beta0.0.20240216210048-da02459ddad8 diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 647a02b9b0e..f31a11d389d 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1468,8 +1468,8 @@ github.com/smartcontractkit/chain-selectors v1.0.10 h1:t9kJeE6B6G+hKD0GYR4kGJSCq github.com/smartcontractkit/chain-selectors v1.0.10/go.mod h1:d4Hi+E1zqjy9HqMkjBE5q1vcG9VGgxf5VxiRHfzi2kE= github.com/smartcontractkit/chainlink-automation v1.0.4 h1:iyW181JjKHLNMnDleI8umfIfVVlwC7+n5izbLSFgjw8= github.com/smartcontractkit/chainlink-automation v1.0.4/go.mod h1:u4NbPZKJ5XiayfKHD/v3z3iflQWqvtdhj13jVZXj/cM= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409 h1:rwo/bzqzbhSPBn1CHFfHiQPcMlpBV/hau4TrpJngTJc= -github.com/smartcontractkit/chainlink-common v0.2.2-0.20240801092904-114abb088409/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c h1:3apUsez/6Pkp1ckXzSwIhzPRuWjDGjzMjKapEKi0Fcw= +github.com/smartcontractkit/chainlink-common v0.2.2-0.20240805160614-501c4f40b98c/go.mod h1:Jg1sCTsbxg76YByI8ifpFby3FvVqISStHT8ypy9ocmY= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45 h1:NBQLtqk8zsyY4qTJs+NElI3aDFTcAo83JHvqD04EvB0= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20240710121324-3ed288aa9b45/go.mod h1:LV0h7QBQUpoC2UUi6TcUvcIFm1xjP/DtEcqV8+qeLUs= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20240801131703-fd75761c982f h1:I9fTBJpHkeldFplXUy71eLIn6A6GxuR4xrABoUeD+CM= From 8b9f2b6b9098e8ec2368773368239106d066e4e3 Mon Sep 17 00:00:00 2001 From: ilija42 <57732589+ilija42@users.noreply.github.com> Date: Tue, 6 Aug 2024 12:52:10 +0200 Subject: [PATCH 4/6] [BCF - 3339] - Codec and CR hashed topics support (#14016) * Add better handling for CR EVM filtering by hashed indexed topics * Add comments for CR event codec usage * Improve err handling in CR init * Add a TODO for CR QueryKey primitive remapping handling * Update codec test to match most recent changes * Add changeset and run solidity prettier * Add contracts changeset for ChainReaderTester contract changes * simplify getNativeAndCheckedTypesForArg FixedBytesTy case --- .changeset/thin-rings-count.md | 5 + contracts/.changeset/itchy-turtles-agree.md | 5 + .../shared/test/helpers/ChainReaderTester.sol | 8 + .../chain_reader_tester.go | 175 +++++++++++++++++- ...rapper-dependency-versions-do-not-edit.txt | 2 +- core/services/relay/evm/chain_reader.go | 10 +- core/services/relay/evm/event_binding.go | 67 ++++--- .../chain_reader_interface_tester.go | 16 +- .../relay/evm/evmtesting/run_tests.go | 37 +++- core/services/relay/evm/types/codec_entry.go | 2 +- .../relay/evm/types/codec_entry_test.go | 28 ++- 11 files changed, 314 insertions(+), 41 deletions(-) create mode 100644 .changeset/thin-rings-count.md create mode 100644 contracts/.changeset/itchy-turtles-agree.md diff --git a/.changeset/thin-rings-count.md b/.changeset/thin-rings-count.md new file mode 100644 index 00000000000..20f4b54311e --- /dev/null +++ b/.changeset/thin-rings-count.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#internal Add evm Chain Reader GetLatestValue support for filtering on indexed topic types that get hashed. diff --git a/contracts/.changeset/itchy-turtles-agree.md b/contracts/.changeset/itchy-turtles-agree.md new file mode 100644 index 00000000000..930ab850d9b --- /dev/null +++ b/contracts/.changeset/itchy-turtles-agree.md @@ -0,0 +1,5 @@ +--- +'@chainlink/contracts': minor +--- + +#internal Add an event with indexed topics that get hashed to Chain Reader Tester contract. diff --git a/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol b/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol index 58a4b9a25c5..709d00cc382 100644 --- a/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol +++ b/contracts/src/v0.8/shared/test/helpers/ChainReaderTester.sol @@ -40,6 +40,9 @@ contract ChainReaderTester { // First topic is event hash event TriggeredWithFourTopics(int32 indexed field1, int32 indexed field2, int32 indexed field3); + // first topic is event hash, second and third topics get hashed before getting stored + event TriggeredWithFourTopicsWithHashed(string indexed field1, uint8[32] indexed field2, bytes32 indexed field3); + TestStruct[] private s_seen; uint64[] private s_arr; uint64 private s_value; @@ -125,4 +128,9 @@ contract ChainReaderTester { function triggerWithFourTopics(int32 field1, int32 field2, int32 field3) public { emit TriggeredWithFourTopics(field1, field2, field3); } + + // first topic is event hash, second and third topics get hashed before getting stored + function triggerWithFourTopicsWithHashed(string memory field1, uint8[32] memory field2, bytes32 field3) public { + emit TriggeredWithFourTopicsWithHashed(field1, field2, field3); + } } diff --git a/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go b/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go index 751df822696..c59a6f0f0d1 100644 --- a/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go +++ b/core/gethwrappers/generated/chain_reader_tester/chain_reader_tester.go @@ -52,8 +52,8 @@ type TestStruct struct { } var ChainReaderTesterMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"indexed\":false,\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"Triggered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"fieldHash\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"TriggeredEventWithDynamicTopic\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"TriggeredWithFourTopics\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"addTestStruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAlterablePrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDifferentPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"i\",\"type\":\"uint256\"}],\"name\":\"getElementAtIndex\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSliceValue\",\"outputs\":[{\"internalType\":\"uint64[]\",\"name\":\"\",\"type\":\"uint64[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"returnSeen\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"value\",\"type\":\"uint64\"}],\"name\":\"setAlterablePrimitiveValue\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"triggerEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"triggerEventWithDynamicTopic\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"triggerWithFourTopics\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b50600180548082018255600082905260048082047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6908101805460086003958616810261010090810a8088026001600160401b0391820219909416939093179093558654808801909755848704909301805496909516909202900a91820291021990921691909117905561181e806100a96000396000f3fe608060405234801561001057600080fd5b50600436106100c95760003560e01c80637f002d6711610081578063ef4e1ced1161005b578063ef4e1ced146101c0578063f6f871c8146101c7578063fbe9fbf6146101da57600080fd5b80637f002d671461017d578063ab5e0b3814610190578063dbfd7332146101ad57600080fd5b806349eac2ac116100b257806349eac2ac1461010c578063679004a41461011f5780636c9a43b61461013457600080fd5b80632c45576f146100ce5780633272b66c146100f7575b600080fd5b6100e16100dc366004610c2b565b6101ec565b6040516100ee9190610d8a565b60405180910390f35b61010a610105366004610ec9565b6104c7565b005b61010a61011a366004610fde565b61051c565b61012761081f565b6040516100ee91906110d0565b61010a61014236600461111e565b600280547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff92909216919091179055565b61010a61018b366004610fde565b6108ab565b6107c65b60405167ffffffffffffffff90911681526020016100ee565b61010a6101bb36600461114f565b610902565b6003610194565b6100e16101d5366004610fde565b61093f565b60025467ffffffffffffffff16610194565b6101f4610a48565b6000610201600184611192565b81548110610211576102116111cc565b6000918252602091829020604080516101008101909152600a90920201805460030b8252600181018054929391929184019161024c906111fb565b80601f0160208091040260200160405190810160405280929190818152602001828054610278906111fb565b80156102c55780601f1061029a576101008083540402835291602001916102c5565b820191906000526020600020905b8154815290600101906020018083116102a857829003601f168201915b5050509183525050600282015460ff166020808301919091526040805161040081018083529190930192916003850191826000855b825461010083900a900460ff168152602060019283018181049485019490930390920291018084116102fa57505050928452505050600482015473ffffffffffffffffffffffffffffffffffffffff1660208083019190915260058301805460408051828502810185018252828152940193928301828280156103b357602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff168152600190910190602001808311610388575b5050509183525050600682015460170b6020808301919091526040805180820182526007808601805460f01b7fffff0000000000000000000000000000000000000000000000000000000000001683528351808501855260088801805490930b81526009880180549590970196939591948683019491939284019190610438906111fb565b80601f0160208091040260200160405190810160405280929190818152602001828054610464906111fb565b80156104b15780601f10610486576101008083540402835291602001916104b1565b820191906000526020600020905b81548152906001019060200180831161049457829003601f168201915b5050509190925250505090525090525092915050565b81816040516104d7929190611248565b60405180910390207f3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c6783836040516105109291906112a1565b60405180910390a25050565b60006040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b602082015260400161060e8461139e565b905281546001808201845560009384526020938490208351600a9093020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff90931692909217825592820151919290919082019061067490826114f8565b5060408201516002820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff90921691909117905560608201516106c29060038301906020610a97565b5060808201516004820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905560a08201518051610729916005840191602090910190610b2a565b5060c08201516006820180547fffffffffffffffff0000000000000000000000000000000000000000000000001677ffffffffffffffffffffffffffffffffffffffffffffffff90921691909117905560e082015180516007830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660f09290921c91909117815560208083015180516008860180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff90921691909117815591810151909190600986019061080c90826114f8565b5050505050505050505050505050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156108a157602002820191906000526020600020906000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff168152602001906008019060208260070104928301926001038202915080841161085c5790505b5050505050905090565b8960030b7f7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d8a8a8a8a8a8a8a8a8a6040516108ee99989796959493929190611757565b60405180910390a250505050505050505050565b8060030b8260030b8460030b7f91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac560405160405180910390a4505050565b610947610a48565b6040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b6020820152604001610a378461139e565b90529b9a5050505050505050505050565b6040805161010081018252600080825260606020830181905292820152908101610a70610ba4565b8152600060208201819052606060408301819052820152608001610a92610bc3565b905290565b600183019183908215610b1a5791602002820160005b83821115610aeb57835183826101000a81548160ff021916908360ff1602179055509260200192600101602081600001049283019260010302610aad565b8015610b185782816101000a81549060ff0219169055600101602081600001049283019260010302610aeb565b505b50610b26929150610c16565b5090565b828054828255906000526020600020908101928215610b1a579160200282015b82811115610b1a57825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610b4a565b6040518061040001604052806020906020820280368337509192915050565b604051806040016040528060007dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001610a926040518060400160405280600060070b8152602001606081525090565b5b80821115610b265760008155600101610c17565b600060208284031215610c3d57600080fd5b5035919050565b6000815180845260005b81811015610c6a57602081850181015186830182015201610c4e565b5060006020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b8060005b6020808210610cbb5750610cd2565b825160ff1685529384019390910190600101610cac565b50505050565b600081518084526020808501945080840160005b83811015610d1e57815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610cec565b509495945050505050565b7fffff00000000000000000000000000000000000000000000000000000000000081511682526000602082015160406020850152805160070b60408501526020810151905060406060850152610d826080850182610c44565b949350505050565b60208152610d9e60208201835160030b9052565b600060208301516104e0806040850152610dbc610500850183610c44565b91506040850151610dd2606086018260ff169052565b506060850151610de56080860182610ca8565b50608085015173ffffffffffffffffffffffffffffffffffffffff1661048085015260a08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe085840381016104a0870152610e428483610cd8565b935060c08701519150610e5b6104c087018360170b9052565b60e0870151915080868503018387015250610e768382610d29565b9695505050505050565b60008083601f840112610e9257600080fd5b50813567ffffffffffffffff811115610eaa57600080fd5b602083019150836020828501011115610ec257600080fd5b9250929050565b60008060208385031215610edc57600080fd5b823567ffffffffffffffff811115610ef357600080fd5b610eff85828601610e80565b90969095509350505050565b8035600381900b8114610f1d57600080fd5b919050565b803560ff81168114610f1d57600080fd5b806104008101831015610f4557600080fd5b92915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610f1d57600080fd5b60008083601f840112610f8157600080fd5b50813567ffffffffffffffff811115610f9957600080fd5b6020830191508360208260051b8501011115610ec257600080fd5b8035601781900b8114610f1d57600080fd5b600060408284031215610fd857600080fd5b50919050565b6000806000806000806000806000806104e08b8d031215610ffe57600080fd5b6110078b610f0b565b995060208b013567ffffffffffffffff8082111561102457600080fd5b6110308e838f01610e80565b909b50995089915061104460408e01610f22565b98506110538e60608f01610f33565b97506110626104608e01610f4b565b96506104808d013591508082111561107957600080fd5b6110858e838f01610f6f565b909650945084915061109a6104a08e01610fb4565b93506104c08d01359150808211156110b157600080fd5b506110be8d828e01610fc6565b9150509295989b9194979a5092959850565b6020808252825182820181905260009190848201906040850190845b8181101561111257835167ffffffffffffffff16835292840192918401916001016110ec565b50909695505050505050565b60006020828403121561113057600080fd5b813567ffffffffffffffff8116811461114857600080fd5b9392505050565b60008060006060848603121561116457600080fd5b61116d84610f0b565b925061117b60208501610f0b565b915061118960408501610f0b565b90509250925092565b81810381811115610f45577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600181811c9082168061120f57607f821691505b602082108103610fd8577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000610d82602083018486611258565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715611307576113076112b5565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611354576113546112b5565b604052919050565b80357fffff00000000000000000000000000000000000000000000000000000000000081168114610f1d57600080fd5b8035600781900b8114610f1d57600080fd5b6000604082360312156113b057600080fd5b6113b86112e4565b6113c18361135c565b815260208084013567ffffffffffffffff808211156113df57600080fd5b8186019150604082360312156113f457600080fd5b6113fc6112e4565b6114058361138c565b8152838301358281111561141857600080fd5b929092019136601f84011261142c57600080fd5b82358281111561143e5761143e6112b5565b61146e857fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f8401160161130d565b9250808352368582860101111561148457600080fd5b8085850186850137600090830185015280840191909152918301919091525092915050565b601f8211156114f357600081815260208120601f850160051c810160208610156114d05750805b601f850160051c820191505b818110156114ef578281556001016114dc565b5050505b505050565b815167ffffffffffffffff811115611512576115126112b5565b6115268161152084546111fb565b846114a9565b602080601f83116001811461157957600084156115435750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556114ef565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156115c6578886015182559484019460019091019084016115a7565b508582101561160257878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8183526000602080850194508260005b85811015610d1e5773ffffffffffffffffffffffffffffffffffffffff61164883610f4b565b1687529582019590820190600101611622565b7fffff0000000000000000000000000000000000000000000000000000000000006116858261135c565b168252600060208201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc18336030181126116bf57600080fd5b6040602085015282016116d18161138c565b60070b604085015260208101357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe182360301811261170e57600080fd5b0160208101903567ffffffffffffffff81111561172a57600080fd5b80360382131561173957600080fd5b6040606086015261174e608086018284611258565b95945050505050565b60006104c080835261176c8184018c8e611258565b9050602060ff808c1682860152604085018b60005b848110156117a6578361179383610f22565b1683529184019190840190600101611781565b505050505073ffffffffffffffffffffffffffffffffffffffff88166104408401528281036104608401526117dc818789611612565b90506117ee61048084018660170b9052565b8281036104a0840152611801818561165b565b9c9b50505050505050505050505056fea164736f6c6343000813000a", + ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"indexed\":false,\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"indexed\":false,\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"Triggered\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"fieldHash\",\"type\":\"string\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"TriggeredEventWithDynamicTopic\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"indexed\":true,\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"TriggeredWithFourTopics\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"string\",\"name\":\"field1\",\"type\":\"string\"},{\"indexed\":true,\"internalType\":\"uint8[32]\",\"name\":\"field2\",\"type\":\"uint8[32]\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"field3\",\"type\":\"bytes32\"}],\"name\":\"TriggeredWithFourTopicsWithHashed\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"addTestStruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAlterablePrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getDifferentPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"i\",\"type\":\"uint256\"}],\"name\":\"getElementAtIndex\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPrimitiveValue\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getSliceValue\",\"outputs\":[{\"internalType\":\"uint64[]\",\"name\":\"\",\"type\":\"uint64[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"returnSeen\",\"outputs\":[{\"components\":[{\"internalType\":\"int32\",\"name\":\"Field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"DifferentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"OracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"OracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"Account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"Accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"BigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"NestedStruct\",\"type\":\"tuple\"}],\"internalType\":\"structTestStruct\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"value\",\"type\":\"uint64\"}],\"name\":\"setAlterablePrimitiveValue\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field\",\"type\":\"int32\"},{\"internalType\":\"string\",\"name\":\"differentField\",\"type\":\"string\"},{\"internalType\":\"uint8\",\"name\":\"oracleId\",\"type\":\"uint8\"},{\"internalType\":\"uint8[32]\",\"name\":\"oracleIds\",\"type\":\"uint8[32]\"},{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"internalType\":\"address[]\",\"name\":\"accounts\",\"type\":\"address[]\"},{\"internalType\":\"int192\",\"name\":\"bigField\",\"type\":\"int192\"},{\"components\":[{\"internalType\":\"bytes2\",\"name\":\"FixedBytes\",\"type\":\"bytes2\"},{\"components\":[{\"internalType\":\"int64\",\"name\":\"IntVal\",\"type\":\"int64\"},{\"internalType\":\"string\",\"name\":\"S\",\"type\":\"string\"}],\"internalType\":\"structInnerTestStruct\",\"name\":\"Inner\",\"type\":\"tuple\"}],\"internalType\":\"structMidLevelTestStruct\",\"name\":\"nestedStruct\",\"type\":\"tuple\"}],\"name\":\"triggerEvent\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field\",\"type\":\"string\"}],\"name\":\"triggerEventWithDynamicTopic\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"int32\",\"name\":\"field1\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field2\",\"type\":\"int32\"},{\"internalType\":\"int32\",\"name\":\"field3\",\"type\":\"int32\"}],\"name\":\"triggerWithFourTopics\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"field1\",\"type\":\"string\"},{\"internalType\":\"uint8[32]\",\"name\":\"field2\",\"type\":\"uint8[32]\"},{\"internalType\":\"bytes32\",\"name\":\"field3\",\"type\":\"bytes32\"}],\"name\":\"triggerWithFourTopicsWithHashed\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50600180548082018255600082905260048082047fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6908101805460086003958616810261010090810a8088026001600160401b0391820219909416939093179093558654808801909755848704909301805496909516909202900a91820291021990921691909117905561199c806100a96000396000f3fe608060405234801561001057600080fd5b50600436106100d45760003560e01c8063a90e199811610081578063ef4e1ced1161005b578063ef4e1ced146101de578063f6f871c8146101e5578063fbe9fbf6146101f857600080fd5b8063a90e19981461019b578063ab5e0b38146101ae578063dbfd7332146101cb57600080fd5b8063679004a4116100b2578063679004a41461012a5780636c9a43b61461013f5780637f002d671461018857600080fd5b80632c45576f146100d95780633272b66c1461010257806349eac2ac14610117575b600080fd5b6100ec6100e7366004610ca3565b61020a565b6040516100f99190610e0c565b60405180910390f35b610115610110366004610f4b565b6104e5565b005b610115610125366004611060565b61053a565b61013261083d565b6040516100f99190611152565b61011561014d3660046111a0565b600280547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff92909216919091179055565b610115610196366004611060565b6108c9565b6101156101a93660046112d4565b610920565b6107c65b60405167ffffffffffffffff90911681526020016100f9565b6101156101d9366004611389565b61097a565b60036101b2565b6100ec6101f3366004611060565b6109b7565b60025467ffffffffffffffff166101b2565b610212610ac0565b600061021f6001846113cc565b8154811061022f5761022f611406565b6000918252602091829020604080516101008101909152600a90920201805460030b8252600181018054929391929184019161026a90611435565b80601f016020809104026020016040519081016040528092919081815260200182805461029690611435565b80156102e35780601f106102b8576101008083540402835291602001916102e3565b820191906000526020600020905b8154815290600101906020018083116102c657829003601f168201915b5050509183525050600282015460ff166020808301919091526040805161040081018083529190930192916003850191826000855b825461010083900a900460ff1681526020600192830181810494850194909303909202910180841161031857505050928452505050600482015473ffffffffffffffffffffffffffffffffffffffff1660208083019190915260058301805460408051828502810185018252828152940193928301828280156103d157602002820191906000526020600020905b815473ffffffffffffffffffffffffffffffffffffffff1681526001909101906020018083116103a6575b5050509183525050600682015460170b6020808301919091526040805180820182526007808601805460f01b7fffff0000000000000000000000000000000000000000000000000000000000001683528351808501855260088801805490930b8152600988018054959097019693959194868301949193928401919061045690611435565b80601f016020809104026020016040519081016040528092919081815260200182805461048290611435565b80156104cf5780601f106104a4576101008083540402835291602001916104cf565b820191906000526020600020905b8154815290600101906020018083116104b257829003601f168201915b5050509190925250505090525090525092915050565b81816040516104f5929190611482565b60405180910390207f3d969732b1bbbb9f1d7eb9f3f14e4cb50a74d950b3ef916a397b85dfbab93c67838360405161052e9291906114db565b60405180910390a25050565b60006040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b602082015260400161062c84611531565b905281546001808201845560009384526020938490208351600a9093020180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000001663ffffffff909316929092178255928201519192909190820190610692908261161e565b5060408201516002820180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660ff90921691909117905560608201516106e09060038301906020610b0f565b5060808201516004820180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff90921691909117905560a08201518051610747916005840191602090910190610ba2565b5060c08201516006820180547fffffffffffffffff0000000000000000000000000000000000000000000000001677ffffffffffffffffffffffffffffffffffffffffffffffff90921691909117905560e082015180516007830180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00001660f09290921c91909117815560208083015180516008860180547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff90921691909117815591810151909190600986019061082a908261161e565b5050505050505050505050505050505050565b606060018054806020026020016040519081016040528092919081815260200182805480156108bf57602002820191906000526020600020906000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff168152602001906008019060208260070104928301926001038202915080841161087a5790505b5050505050905090565b8960030b7f7188419dcd8b51877b71766f075f3626586c0ff190e7d056aa65ce9acb649a3d8a8a8a8a8a8a8a8a8a60405161090c9998979695949392919061187d565b60405180910390a250505050505050505050565b808260405161092f9190611937565b6040518091039020846040516109459190611973565b604051908190038120907f7220e4dbe4e9d0ed5f71acd022bc89c26748ac6784f2c548bc17bb8e52af34b090600090a4505050565b8060030b8260030b8460030b7f91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac560405160405180910390a4505050565b6109bf610ac0565b6040518061010001604052808c60030b81526020018b8b8080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050509082525060ff8a166020808301919091526040805161040081810183529190930192918b9183908390808284376000920191909152505050815273ffffffffffffffffffffffffffffffffffffffff8816602080830191909152604080518883028181018401835289825291909301929189918991829190850190849080828437600092019190915250505090825250601785900b6020820152604001610aaf84611531565b90529b9a5050505050505050505050565b6040805161010081018252600080825260606020830181905292820152908101610ae8610c1c565b8152600060208201819052606060408301819052820152608001610b0a610c3b565b905290565b600183019183908215610b925791602002820160005b83821115610b6357835183826101000a81548160ff021916908360ff1602179055509260200192600101602081600001049283019260010302610b25565b8015610b905782816101000a81549060ff0219169055600101602081600001049283019260010302610b63565b505b50610b9e929150610c8e565b5090565b828054828255906000526020600020908101928215610b92579160200282015b82811115610b9257825182547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff909116178255602090920191600190910190610bc2565b6040518061040001604052806020906020820280368337509192915050565b604051806040016040528060007dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19168152602001610b0a6040518060400160405280600060070b8152602001606081525090565b5b80821115610b9e5760008155600101610c8f565b600060208284031215610cb557600080fd5b5035919050565b60005b83811015610cd7578181015183820152602001610cbf565b50506000910152565b60008151808452610cf8816020860160208601610cbc565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b8060005b6020808210610d3d5750610d54565b825160ff1685529384019390910190600101610d2e565b50505050565b600081518084526020808501945080840160005b83811015610da057815173ffffffffffffffffffffffffffffffffffffffff1687529582019590820190600101610d6e565b509495945050505050565b7fffff00000000000000000000000000000000000000000000000000000000000081511682526000602082015160406020850152805160070b60408501526020810151905060406060850152610e046080850182610ce0565b949350505050565b60208152610e2060208201835160030b9052565b600060208301516104e0806040850152610e3e610500850183610ce0565b91506040850151610e54606086018260ff169052565b506060850151610e676080860182610d2a565b50608085015173ffffffffffffffffffffffffffffffffffffffff1661048085015260a08501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe085840381016104a0870152610ec48483610d5a565b935060c08701519150610edd6104c087018360170b9052565b60e0870151915080868503018387015250610ef88382610dab565b9695505050505050565b60008083601f840112610f1457600080fd5b50813567ffffffffffffffff811115610f2c57600080fd5b602083019150836020828501011115610f4457600080fd5b9250929050565b60008060208385031215610f5e57600080fd5b823567ffffffffffffffff811115610f7557600080fd5b610f8185828601610f02565b90969095509350505050565b8035600381900b8114610f9f57600080fd5b919050565b803560ff81168114610f9f57600080fd5b806104008101831015610fc757600080fd5b92915050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610f9f57600080fd5b60008083601f84011261100357600080fd5b50813567ffffffffffffffff81111561101b57600080fd5b6020830191508360208260051b8501011115610f4457600080fd5b8035601781900b8114610f9f57600080fd5b60006040828403121561105a57600080fd5b50919050565b6000806000806000806000806000806104e08b8d03121561108057600080fd5b6110898b610f8d565b995060208b013567ffffffffffffffff808211156110a657600080fd5b6110b28e838f01610f02565b909b5099508991506110c660408e01610fa4565b98506110d58e60608f01610fb5565b97506110e46104608e01610fcd565b96506104808d01359150808211156110fb57600080fd5b6111078e838f01610ff1565b909650945084915061111c6104a08e01611036565b93506104c08d013591508082111561113357600080fd5b506111408d828e01611048565b9150509295989b9194979a5092959850565b6020808252825182820181905260009190848201906040850190845b8181101561119457835167ffffffffffffffff168352928401929184019160010161116e565b50909695505050505050565b6000602082840312156111b257600080fd5b813567ffffffffffffffff811681146111ca57600080fd5b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040805190810167ffffffffffffffff81118282101715611223576112236111d1565b60405290565b600082601f83011261123a57600080fd5b813567ffffffffffffffff80821115611255576112556111d1565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190828211818310171561129b5761129b6111d1565b816040528381528660208588010111156112b457600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600061044084860312156112ea57600080fd5b833567ffffffffffffffff8082111561130257600080fd5b61130e87838801611229565b94506020915086603f87011261132357600080fd5b6040516104008101818110838211171561133f5761133f6111d1565b60405290508061042087018881111561135757600080fd5b8388015b818110156113795761136c81610fa4565b845292840192840161135b565b5095989097509435955050505050565b60008060006060848603121561139e57600080fd5b6113a784610f8d565b92506113b560208501610f8d565b91506113c360408501610f8d565b90509250925092565b81810381811115610fc7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600181811c9082168061144957607f821691505b60208210810361105a577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b8183823760009101908152919050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b602081526000610e04602083018486611492565b80357fffff00000000000000000000000000000000000000000000000000000000000081168114610f9f57600080fd5b8035600781900b8114610f9f57600080fd5b60006040823603121561154357600080fd5b61154b611200565b611554836114ef565b8152602083013567ffffffffffffffff8082111561157157600080fd5b81850191506040823603121561158657600080fd5b61158e611200565b6115978361151f565b81526020830135828111156115ab57600080fd5b6115b736828601611229565b60208301525080602085015250505080915050919050565b601f82111561161957600081815260208120601f850160051c810160208610156115f65750805b601f850160051c820191505b8181101561161557828155600101611602565b5050505b505050565b815167ffffffffffffffff811115611638576116386111d1565b61164c816116468454611435565b846115cf565b602080601f83116001811461169f57600084156116695750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555611615565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156116ec578886015182559484019460019091019084016116cd565b508582101561172857878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b8183526000602080850194508260005b85811015610da05773ffffffffffffffffffffffffffffffffffffffff61176e83610fcd565b1687529582019590820190600101611748565b7fffff0000000000000000000000000000000000000000000000000000000000006117ab826114ef565b168252600060208201357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc18336030181126117e557600080fd5b6040602085015282016117f78161151f565b60070b604085015260208101357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe182360301811261183457600080fd5b0160208101903567ffffffffffffffff81111561185057600080fd5b80360382131561185f57600080fd5b60406060860152611874608086018284611492565b95945050505050565b60006104c08083526118928184018c8e611492565b9050602060ff808c1682860152604085018b60005b848110156118cc57836118b983610fa4565b16835291840191908401906001016118a7565b505050505073ffffffffffffffffffffffffffffffffffffffff8816610440840152828103610460840152611902818789611738565b905061191461048084018660170b9052565b8281036104a08401526119278185611781565b9c9b505050505050505050505050565b60008183825b602080821061194c5750611963565b825160ff168452928301929091019060010161193d565b5050506104008201905092915050565b60008251611985818460208701610cbc565b919091019291505056fea164736f6c6343000813000a", } var ChainReaderTesterABI = ChainReaderTesterMetaData.ABI @@ -384,6 +384,18 @@ func (_ChainReaderTester *ChainReaderTesterTransactorSession) TriggerWithFourTop return _ChainReaderTester.Contract.TriggerWithFourTopics(&_ChainReaderTester.TransactOpts, field1, field2, field3) } +func (_ChainReaderTester *ChainReaderTesterTransactor) TriggerWithFourTopicsWithHashed(opts *bind.TransactOpts, field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) { + return _ChainReaderTester.contract.Transact(opts, "triggerWithFourTopicsWithHashed", field1, field2, field3) +} + +func (_ChainReaderTester *ChainReaderTesterSession) TriggerWithFourTopicsWithHashed(field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) { + return _ChainReaderTester.Contract.TriggerWithFourTopicsWithHashed(&_ChainReaderTester.TransactOpts, field1, field2, field3) +} + +func (_ChainReaderTester *ChainReaderTesterTransactorSession) TriggerWithFourTopicsWithHashed(field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) { + return _ChainReaderTester.Contract.TriggerWithFourTopicsWithHashed(&_ChainReaderTester.TransactOpts, field1, field2, field3) +} + type ChainReaderTesterTriggeredIterator struct { Event *ChainReaderTesterTriggered @@ -791,6 +803,151 @@ func (_ChainReaderTester *ChainReaderTesterFilterer) ParseTriggeredWithFourTopic return event, nil } +type ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator struct { + Event *ChainReaderTesterTriggeredWithFourTopicsWithHashed + + contract *bind.BoundContract + event string + + logs chan types.Log + sub ethereum.Subscription + done bool + fail error +} + +func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Next() bool { + + if it.fail != nil { + return false + } + + if it.done { + select { + case log := <-it.logs: + it.Event = new(ChainReaderTesterTriggeredWithFourTopicsWithHashed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + + select { + case log := <-it.logs: + it.Event = new(ChainReaderTesterTriggeredWithFourTopicsWithHashed) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Error() error { + return it.fail +} + +func (it *ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +type ChainReaderTesterTriggeredWithFourTopicsWithHashed struct { + Field1 common.Hash + Field2 [32]uint8 + Field3 [32]byte + Raw types.Log +} + +func (_ChainReaderTester *ChainReaderTesterFilterer) FilterTriggeredWithFourTopicsWithHashed(opts *bind.FilterOpts, field1 []string, field2 [][32]uint8, field3 [][32]byte) (*ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator, error) { + + var field1Rule []interface{} + for _, field1Item := range field1 { + field1Rule = append(field1Rule, field1Item) + } + var field2Rule []interface{} + for _, field2Item := range field2 { + field2Rule = append(field2Rule, field2Item) + } + var field3Rule []interface{} + for _, field3Item := range field3 { + field3Rule = append(field3Rule, field3Item) + } + + logs, sub, err := _ChainReaderTester.contract.FilterLogs(opts, "TriggeredWithFourTopicsWithHashed", field1Rule, field2Rule, field3Rule) + if err != nil { + return nil, err + } + return &ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator{contract: _ChainReaderTester.contract, event: "TriggeredWithFourTopicsWithHashed", logs: logs, sub: sub}, nil +} + +func (_ChainReaderTester *ChainReaderTesterFilterer) WatchTriggeredWithFourTopicsWithHashed(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggeredWithFourTopicsWithHashed, field1 []string, field2 [][32]uint8, field3 [][32]byte) (event.Subscription, error) { + + var field1Rule []interface{} + for _, field1Item := range field1 { + field1Rule = append(field1Rule, field1Item) + } + var field2Rule []interface{} + for _, field2Item := range field2 { + field2Rule = append(field2Rule, field2Item) + } + var field3Rule []interface{} + for _, field3Item := range field3 { + field3Rule = append(field3Rule, field3Item) + } + + logs, sub, err := _ChainReaderTester.contract.WatchLogs(opts, "TriggeredWithFourTopicsWithHashed", field1Rule, field2Rule, field3Rule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + + event := new(ChainReaderTesterTriggeredWithFourTopicsWithHashed) + if err := _ChainReaderTester.contract.UnpackLog(event, "TriggeredWithFourTopicsWithHashed", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +func (_ChainReaderTester *ChainReaderTesterFilterer) ParseTriggeredWithFourTopicsWithHashed(log types.Log) (*ChainReaderTesterTriggeredWithFourTopicsWithHashed, error) { + event := new(ChainReaderTesterTriggeredWithFourTopicsWithHashed) + if err := _ChainReaderTester.contract.UnpackLog(event, "TriggeredWithFourTopicsWithHashed", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + func (_ChainReaderTester *ChainReaderTester) ParseLog(log types.Log) (generated.AbigenLog, error) { switch log.Topics[0] { case _ChainReaderTester.abi.Events["Triggered"].ID: @@ -799,6 +956,8 @@ func (_ChainReaderTester *ChainReaderTester) ParseLog(log types.Log) (generated. return _ChainReaderTester.ParseTriggeredEventWithDynamicTopic(log) case _ChainReaderTester.abi.Events["TriggeredWithFourTopics"].ID: return _ChainReaderTester.ParseTriggeredWithFourTopics(log) + case _ChainReaderTester.abi.Events["TriggeredWithFourTopicsWithHashed"].ID: + return _ChainReaderTester.ParseTriggeredWithFourTopicsWithHashed(log) default: return nil, fmt.Errorf("abigen wrapper received unknown log topic: %v", log.Topics[0]) @@ -817,6 +976,10 @@ func (ChainReaderTesterTriggeredWithFourTopics) Topic() common.Hash { return common.HexToHash("0x91c80dc390f3d041b3a04b0099b19634499541ea26972250986ee4b24a12fac5") } +func (ChainReaderTesterTriggeredWithFourTopicsWithHashed) Topic() common.Hash { + return common.HexToHash("0x7220e4dbe4e9d0ed5f71acd022bc89c26748ac6784f2c548bc17bb8e52af34b0") +} + func (_ChainReaderTester *ChainReaderTester) Address() common.Address { return _ChainReaderTester.address } @@ -844,6 +1007,8 @@ type ChainReaderTesterInterface interface { TriggerWithFourTopics(opts *bind.TransactOpts, field1 int32, field2 int32, field3 int32) (*types.Transaction, error) + TriggerWithFourTopicsWithHashed(opts *bind.TransactOpts, field1 string, field2 [32]uint8, field3 [32]byte) (*types.Transaction, error) + FilterTriggered(opts *bind.FilterOpts, field []int32) (*ChainReaderTesterTriggeredIterator, error) WatchTriggered(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggered, field []int32) (event.Subscription, error) @@ -862,6 +1027,12 @@ type ChainReaderTesterInterface interface { ParseTriggeredWithFourTopics(log types.Log) (*ChainReaderTesterTriggeredWithFourTopics, error) + FilterTriggeredWithFourTopicsWithHashed(opts *bind.FilterOpts, field1 []string, field2 [][32]uint8, field3 [][32]byte) (*ChainReaderTesterTriggeredWithFourTopicsWithHashedIterator, error) + + WatchTriggeredWithFourTopicsWithHashed(opts *bind.WatchOpts, sink chan<- *ChainReaderTesterTriggeredWithFourTopicsWithHashed, field1 []string, field2 [][32]uint8, field3 [][32]byte) (event.Subscription, error) + + ParseTriggeredWithFourTopicsWithHashed(log types.Log) (*ChainReaderTesterTriggeredWithFourTopicsWithHashed, error) + ParseLog(log types.Log) (generated.AbigenLog, error) Address() common.Address diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 41c270d61c0..3299989c582 100644 --- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -24,7 +24,7 @@ batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2/Batc batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.19/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.19/BatchVRFCoordinatorV2Plus/BatchVRFCoordinatorV2Plus.bin f13715b38b5b9084b08bffa571fb1c8ef686001535902e1255052f074b31ad4e blockhash_store: ../../contracts/solc/v0.8.19/BlockhashStore/BlockhashStore.abi ../../contracts/solc/v0.8.19/BlockhashStore/BlockhashStore.bin 31b118f9577240c8834c35f8b5a1440e82a6ca8aea702970de2601824b6ab0e1 chain_module_base: ../../contracts/solc/v0.8.19/ChainModuleBase/ChainModuleBase.abi ../../contracts/solc/v0.8.19/ChainModuleBase/ChainModuleBase.bin 39dfce79330e921e5c169051b11c6e5ea15cd4db5a7b09c06aabbe9658148915 -chain_reader_tester: ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.abi ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.bin b3718dad488f54de97d124221d96b867c81e11210084a1fad379cb8385d37ffe +chain_reader_tester: ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.abi ../../contracts/solc/v0.8.19/ChainReaderTester/ChainReaderTester.bin b207f9e6bf71e445a2664a602677011b87b80bf95c6352fd7869f1a9ddb08a5b chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper/ChainSpecificUtilHelper.bin 66eb30b0717fefe05672df5ec863c0b9a5a654623c4757307a2726d8f31e26b1 counter: ../../contracts/solc/v0.8.6/Counter/Counter.abi ../../contracts/solc/v0.8.6/Counter/Counter.bin 6ca06e000e8423573ffa0bdfda749d88236ab3da2a4cbb4a868c706da90488c9 cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7 diff --git a/core/services/relay/evm/chain_reader.go b/core/services/relay/evm/chain_reader.go index d84c2f00a9c..205fcbbcf07 100644 --- a/core/services/relay/evm/chain_reader.go +++ b/core/services/relay/evm/chain_reader.go @@ -128,6 +128,10 @@ func (cr *chainReader) init(chainContractReaders map[string]types.ChainContractR return err } } + + if cr.bindings.contractBindings[contractName] == nil { + return fmt.Errorf("%w: no read bindings added for contract: %s", commontypes.ErrInvalidConfig, contractName) + } cr.bindings.contractBindings[contractName].pollingFilter = chainContractReader.PollingFilter.ToLPFilter(eventSigsForContractFilter) } return nil @@ -259,7 +263,7 @@ func (cr *chainReader) addEvent(contractName, eventName string, a abi.ABI, chain return err } - // Encoder def's codec won't be used to encode, only for its type as input for GetLatestValue + // Encoder defs codec won't be used for encoding, but for storing caller filtering params which won't be hashed. if err := cr.addEncoderDef(contractName, eventName, filterArgs, nil, chainReaderDefinition.InputModifications); err != nil { return err } @@ -327,9 +331,11 @@ func (cr *chainReader) addQueryingReadBindings(contractName string, genericTopic } } +// getEventInput returns codec entry for expected incoming event params and the modifier to be applied to the params. func (cr *chainReader) getEventInput(def types.ChainReaderDefinition, contractName, eventName string) ( types.CodecEntry, codec.Modifier, error) { inputInfo := cr.parsed.EncoderDefs[WrapItemType(contractName, eventName, true)] + // TODO can this be simplified? Isn't this same as inputInfo.Modifier()? BCI-3909 inMod, err := def.InputModifications.ToModifier(DecoderHooks...) if err != nil { return nil, nil, err @@ -378,6 +384,8 @@ func (cr *chainReader) addDecoderDef(contractName, itemType string, outputs abi. return output.Init() } +// setupEventInput returns abi args where indexed flag is set to false because we expect caller to filter with params that aren't hashed. +// codecEntry has expected onchain types set, for e.g. indexed topics of type string or uint8[32] array are expected as common.Hash onchain. func setupEventInput(event abi.Event, inputFields []string) ([]abi.Argument, types.CodecEntry, map[string]bool) { topicFieldDefs := map[string]bool{} for _, value := range inputFields { diff --git a/core/services/relay/evm/event_binding.go b/core/services/relay/evm/event_binding.go index acfb1aa6300..97ddc99a107 100644 --- a/core/services/relay/evm/event_binding.go +++ b/core/services/relay/evm/event_binding.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/google/uuid" "github.com/smartcontractkit/chainlink-common/pkg/codec" @@ -209,11 +210,13 @@ func (e *eventBinding) getLatestValueWithFilters( return err } + // convert caller chain agnostic params types to types representing onchain abi types, for e.g. bytes32. checkedParams, err := e.inputModifier.TransformToOnChain(offChain, "" /* unused */) if err != nil { return err } + // convert onchain params to native types similarly to generated abi wrappers, for e.g. fixed bytes32 abi type to [32]uint8. nativeParams, err := e.inputInfo.ToNative(reflect.ValueOf(checkedParams)) if err != nil { return err @@ -252,6 +255,8 @@ func (e *eventBinding) getLatestValueWithFilters( return e.decodeLog(ctx, logToUse, into) } +// convertToOffChainType creates a struct based on contract abi with applied codec modifiers. +// Created type shouldn't have hashed types for indexed topics since incoming params wouldn't be hashed. func (e *eventBinding) convertToOffChainType(params any) (any, error) { offChain, err := e.codec.CreateType(WrapItemType(e.contractName, e.eventName, true), true) if err != nil { @@ -287,43 +292,35 @@ func matchesRemainingFilters(log *logpoller.Log, filters []common.Hash) bool { return true } -func (e *eventBinding) encodeParams(item reflect.Value) ([]common.Hash, error) { - for item.Kind() == reflect.Pointer { - item = reflect.Indirect(item) +// encodeParams accepts nativeParams and encodes them to match onchain topics. +func (e *eventBinding) encodeParams(nativeParams reflect.Value) ([]common.Hash, error) { + for nativeParams.Kind() == reflect.Pointer { + nativeParams = reflect.Indirect(nativeParams) } - var topics []any - switch item.Kind() { + var params []any + switch nativeParams.Kind() { case reflect.Array, reflect.Slice: - native, err := representArray(item, e.inputInfo) + native, err := representArray(nativeParams, e.inputInfo) if err != nil { return nil, err } - topics = []any{native} + params = []any{native} case reflect.Struct, reflect.Map: var err error - if topics, err = unrollItem(item, e.inputInfo); err != nil { + if params, err = unrollItem(nativeParams, e.inputInfo); err != nil { return nil, err } default: - return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, item.Kind()) + return nil, fmt.Errorf("%w: cannot encode kind %v", commontypes.ErrInvalidType, nativeParams.Kind()) } - // abi params allow you to Pack a pointers, but MakeTopics doesn't work with pointers. - if err := e.derefTopics(topics); err != nil { + // abi params allow you to Pack a pointers, but makeTopics doesn't work with pointers. + if err := e.derefTopics(params); err != nil { return nil, err } - hashes, err := abi.MakeTopics(topics) - if err != nil { - return nil, wrapInternalErr(err) - } - - if len(hashes) != 1 { - return nil, fmt.Errorf("%w: expected 1 filter set, got %d", commontypes.ErrInternal, len(hashes)) - } - - return hashes[0], nil + return e.makeTopics(params) } func (e *eventBinding) derefTopics(topics []any) error { @@ -340,11 +337,38 @@ func (e *eventBinding) derefTopics(topics []any) error { return nil } +// makeTopics encodes and hashes params filtering values to match onchain indexed topics. +func (e *eventBinding) makeTopics(params []any) ([]common.Hash, error) { + // make topic value for non-fixed bytes array manually because geth MakeTopics doesn't support it + for i, topic := range params { + if abiArg := e.inputInfo.Args()[i]; abiArg.Type.T == abi.ArrayTy && (abiArg.Type.Elem != nil && abiArg.Type.Elem.T == abi.UintTy) { + packed, err := abi.Arguments{abiArg}.Pack(topic) + if err != nil { + return nil, err + } + params[i] = crypto.Keccak256Hash(packed) + } + } + + hashes, err := abi.MakeTopics(params) + if err != nil { + return nil, wrapInternalErr(err) + } + + if len(hashes) != 1 { + return nil, fmt.Errorf("%w: expected 1 filter set, got %d", commontypes.ErrInternal, len(hashes)) + } + + return hashes[0], nil +} + func (e *eventBinding) decodeLog(ctx context.Context, log *logpoller.Log, into any) error { + // decode non indexed topics and apply output modifiers if err := e.codec.Decode(ctx, log.Data, into, WrapItemType(e.contractName, e.eventName, false)); err != nil { return err } + // decode indexed topics which is rarely useful since most indexed topic types get Keccak256 hashed and should be just used for log filtering. topics := make([]common.Hash, len(e.codecTopicInfo.Args())) if len(log.Topics) < len(topics)+1 { return fmt.Errorf("%w: not enough topics to decode", commontypes.ErrInvalidType) @@ -436,6 +460,7 @@ func (e *eventBinding) remapExpression(key string, expression query.Expression) // remap chain agnostic primitives to chain specific func (e *eventBinding) remapPrimitive(key string, expression query.Expression) (query.Expression, error) { switch primitive := expression.Primitive.(type) { + // TODO comparator primitive should undergo codec transformations and do hashed types handling similarly to how GetLatestValue handles it BCI-3910 case *primitives.Comparator: if val, ok := e.eventDataWords[primitive.Name]; ok { return logpoller.NewEventByWordFilter(e.hash, val, primitive.ValueComparators), nil diff --git a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go index 4474f054dbc..7812ab202b1 100644 --- a/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go +++ b/core/services/relay/evm/evmtesting/chain_reader_interface_tester.go @@ -32,9 +32,10 @@ import ( ) const ( - triggerWithDynamicTopic = "TriggeredEventWithDynamicTopic" - triggerWithAllTopics = "TriggeredWithFourTopics" - finalityDepth = 4 + triggerWithDynamicTopic = "TriggeredEventWithDynamicTopic" + triggerWithAllTopics = "TriggeredWithFourTopics" + triggerWithAllTopicsWithHashed = "TriggeredWithFourTopicsWithHashed" + finalityDepth = 4 ) type EVMChainReaderInterfaceTesterHelper[T TestingT[T]] interface { @@ -96,7 +97,7 @@ func (it *EVMChainReaderInterfaceTester[T]) Setup(t T) { AnyContractName: { ContractABI: chain_reader_tester.ChainReaderTesterMetaData.ABI, ContractPollingFilter: types.ContractPollingFilter{ - GenericEventNames: []string{EventName, EventWithFilterName}, + GenericEventNames: []string{EventName, EventWithFilterName, triggerWithAllTopicsWithHashed}, }, Configs: map[string]*types.ChainReaderDefinition{ MethodTakingLatestParamsReturningTestStruct: &methodTakingLatestParamsReturningTestStructConfig, @@ -145,6 +146,13 @@ func (it *EVMChainReaderInterfaceTester[T]) Setup(t T) { // These float values can map to different finality concepts across chains. ConfidenceConfirmations: map[string]int{"0.0": int(evmtypes.Unconfirmed), "1.0": int(evmtypes.Finalized)}, }, + triggerWithAllTopicsWithHashed: { + ChainSpecificName: triggerWithAllTopicsWithHashed, + ReadType: types.Event, + EventDefinitions: &types.EventDefinitions{ + InputFields: []string{"Field1", "Field2", "Field3"}, + }, + }, MethodReturningSeenStruct: { ChainSpecificName: "returnSeen", InputModifications: codec.ModifiersConfig{ diff --git a/core/services/relay/evm/evmtesting/run_tests.go b/core/services/relay/evm/evmtesting/run_tests.go index f958c055ca7..caa24e8ae2c 100644 --- a/core/services/relay/evm/evmtesting/run_tests.go +++ b/core/services/relay/evm/evmtesting/run_tests.go @@ -12,10 +12,9 @@ import ( clcommontypes "github.com/smartcontractkit/chainlink-common/pkg/types" "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" . "github.com/smartcontractkit/chainlink-common/pkg/types/interfacetests" //nolint common practice to import test mods with . - - "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" ) func RunChainReaderEvmTests[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T]) { @@ -74,6 +73,31 @@ func RunChainReaderEvmTests[T TestingT[T]](t T, it *EVMChainReaderInterfaceTeste assert.Equal(t, int32(3), latest.Field3) }) + t.Run("Filtering can be done on indexed topics that get hashed", func(t T) { + it.Setup(t) + it.dirtyContracts = true + triggerFourTopicsWithHashed(t, it, "1", [32]uint8{2}, [32]byte{5}) + triggerFourTopicsWithHashed(t, it, "2", [32]uint8{2}, [32]byte{3}) + triggerFourTopicsWithHashed(t, it, "1", [32]uint8{3}, [32]byte{3}) + + ctx := it.Helper.Context(t) + cr := it.GetChainReader(t) + require.NoError(t, cr.Bind(ctx, it.GetBindings(t))) + var latest struct { + Field3 [32]byte + } + params := struct { + Field1 string + Field2 [32]uint8 + Field3 [32]byte + }{Field1: "1", Field2: [32]uint8{2}, Field3: [32]byte{5}} + + time.Sleep(it.MaxWaitTimeForEvents()) + require.NoError(t, cr.GetLatestValue(ctx, AnyContractName, triggerWithAllTopicsWithHashed, primitives.Unconfirmed, params, &latest)) + // only checking Field3 topic makes sense since it isn't hashed, to check other fields we'd have to replicate solidity encoding and hashing + assert.Equal(t, [32]uint8{5}, latest.Field3) + }) + t.Run("Bind returns error on missing contract at address", func(t T) { it.Setup(t) @@ -95,3 +119,12 @@ func triggerFourTopics[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T], it.IncNonce() it.AwaitTx(t, tx) } + +func triggerFourTopicsWithHashed[T TestingT[T]](t T, it *EVMChainReaderInterfaceTester[T], i1 string, i2 [32]uint8, i3 [32]byte) { + tx, err := it.contractTesters[it.address].ChainReaderTesterTransactor.TriggerWithFourTopicsWithHashed(it.GetAuthWithGasSet(t), i1, i2, i3) + require.NoError(t, err) + require.NoError(t, err) + it.Helper.Commit() + it.IncNonce() + it.AwaitTx(t, tx) +} diff --git a/core/services/relay/evm/types/codec_entry.go b/core/services/relay/evm/types/codec_entry.go index 38242c43a2d..9a8103cf7f9 100644 --- a/core/services/relay/evm/types/codec_entry.go +++ b/core/services/relay/evm/types/codec_entry.go @@ -200,7 +200,7 @@ func getNativeAndCheckedTypesForArg(arg *abi.Argument) (reflect.Type, reflect.Ty return reflect.TypeOf(common.Hash{}), reflect.TypeOf(common.Hash{}), nil } fallthrough - case abi.SliceTy, abi.TupleTy, abi.FixedBytesTy, abi.FixedPointTy, abi.FunctionTy: + case abi.SliceTy, abi.TupleTy, abi.FixedPointTy, abi.FunctionTy: // https://github.com/ethereum/go-ethereum/blob/release/1.12/accounts/abi/topics.go#L78 return nil, nil, fmt.Errorf("%w: unsupported indexed type: %v", commontypes.ErrInvalidConfig, arg.Type) default: diff --git a/core/services/relay/evm/types/codec_entry_test.go b/core/services/relay/evm/types/codec_entry_test.go index 06b08fcecf2..64e0998716a 100644 --- a/core/services/relay/evm/types/codec_entry_test.go +++ b/core/services/relay/evm/types/codec_entry_test.go @@ -273,17 +273,27 @@ func TestCodecEntry(t *testing.T) { assertHaveSameStructureAndNames(t, iNative.Type(), entry.CheckedType()) }) - t.Run("Indexed non basic types change to hash", func(t *testing.T) { - anyType, err := abi.NewType("string", "", []abi.ArgumentMarshaling{}) + t.Run("Indexed string and bytes array change to hash", func(t *testing.T) { + stringType, err := abi.NewType("string", "", []abi.ArgumentMarshaling{}) require.NoError(t, err) - entry := NewCodecEntry(abi.Arguments{{Name: "Name", Type: anyType, Indexed: true}}, nil, nil) - require.NoError(t, entry.Init()) - nativeField, ok := entry.CheckedType().FieldByName("Name") - require.True(t, ok) - assert.Equal(t, reflect.TypeOf(&common.Hash{}), nativeField.Type) - native, err := entry.ToNative(reflect.New(entry.CheckedType())) + arrayType, err := abi.NewType("uint8[32]", "", []abi.ArgumentMarshaling{}) require.NoError(t, err) - assertHaveSameStructureAndNames(t, native.Type().Elem(), entry.CheckedType()) + + abiArgs := abi.Arguments{ + {Name: "String", Type: stringType, Indexed: true}, + {Name: "Array", Type: arrayType, Indexed: true}, + } + + for i := 0; i < len(abiArgs); i++ { + entry := NewCodecEntry(abi.Arguments{abiArgs[i]}, nil, nil) + require.NoError(t, entry.Init()) + nativeField, ok := entry.CheckedType().FieldByName(abiArgs[i].Name) + require.True(t, ok) + assert.Equal(t, reflect.TypeOf(&common.Hash{}), nativeField.Type) + native, err := entry.ToNative(reflect.New(entry.CheckedType())) + require.NoError(t, err) + assertHaveSameStructureAndNames(t, native.Type().Elem(), entry.CheckedType()) + } }) t.Run("Too many indexed items returns an error", func(t *testing.T) { From 55e7c8b5055c975665a59199d5eda9fa21801a07 Mon Sep 17 00:00:00 2001 From: "Abdelrahman Soliman (Boda)" <2677789+asoliman92@users.noreply.github.com> Date: Tue, 6 Aug 2024 15:15:37 +0400 Subject: [PATCH 5/6] [CCIP-Merge] OCR2 plugins [CCIP-2942] (#14043) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Copy over core/services/ocr2/plugins/ccip from ccip repo (#14024) This is first part in merging offchain code from ccip repo (https://github.com/smartcontractkit/ccip) into chainlink Maintaining history across repos for specific direcotires was complicated so we chose to copy the directory right away. ----------------- Co-authored-by: Abdelrahman Soliman (Boda) <2677789+asoliman92@users.noreply.github.com> Co-authored-by: Agustina Aldasoro Co-authored-by: Amir Y <83904651+amirylm@users.noreply.github.com> Co-authored-by: André Vitor de Lima Matos Co-authored-by: AnieeG Co-authored-by: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com> Co-authored-by: Chunkai Yang Co-authored-by: Connor Stein Co-authored-by: Evaldas LatoÅ¡kinas <34982762+elatoskinas@users.noreply.github.com> Co-authored-by: Jean Arnaud Co-authored-by: Justin Kaseman Co-authored-by: Makram Co-authored-by: Makram Kamaleddine Co-authored-by: Mateusz Sekara Co-authored-by: Matt Yang Co-authored-by: Patrick Co-authored-by: Rens Rooimans Co-authored-by: Roman Kashitsyn Co-authored-by: Roman Kashitsyn Co-authored-by: Ryan Stout Co-authored-by: Will Winder Co-authored-by: connorwstein Co-authored-by: dimitris Co-authored-by: dimkouv Co-authored-by: nogo <0xnogo@gmail.com> Co-authored-by: nogo <110664798+0xnogo@users.noreply.github.com> Co-authored-by: valerii-kabisov-cll <172247313+valerii-kabisov-cll@users.noreply.github.com> * [CCIP-2942] OCR2 plugins merge fixes [CCIP-2942] (#14026) * Add status checker original commit: https://github.com/smartcontractkit/ccip/commit/451984ad0469c7d685e305dba0a0d94eb0c9a053 * Add CCIP to types.go * Add Unsafe_SetConnectionsManager to feeds service for testing * Add CCIP feature to core.toml * Rebuilding config * Wiring up relayer and ocr2 delegates - this commit touches shared code ! * Adding mockery configuration for ccip specific code * Setting CCIP feature flag to true - it's no longer used anywhere --------- Co-authored-by: Mateusz Sekara * VRF-1138: Make CTF tests to reuse existing VRF Wrapper (#13854) * VRF-1138: Make CTF tests to reuse existing VRF Wrapper * VRF-1138: remove old code * VRF-1138: remove comments * VRF-1138: refactoring * VRF-1138: pr comments * VRF-1138: pr comments * VRF-1138: fixing lint issues * VRF-1138: PR comments * changes to support deterministic message hash in the remote target (#13935) * common version update to head of develop (#14030) * Add changeset --------- Co-authored-by: Agustina Aldasoro Co-authored-by: Amir Y <83904651+amirylm@users.noreply.github.com> Co-authored-by: André Vitor de Lima Matos Co-authored-by: AnieeG Co-authored-by: Anindita Ghosh <88458927+AnieeG@users.noreply.github.com> Co-authored-by: Chunkai Yang Co-authored-by: Connor Stein Co-authored-by: Evaldas LatoÅ¡kinas <34982762+elatoskinas@users.noreply.github.com> Co-authored-by: Jean Arnaud Co-authored-by: Justin Kaseman Co-authored-by: Makram Co-authored-by: Mateusz Sekara Co-authored-by: Matt Yang Co-authored-by: Patrick Co-authored-by: Rens Rooimans Co-authored-by: Roman Kashitsyn Co-authored-by: Roman Kashitsyn Co-authored-by: Ryan Stout Co-authored-by: Will Winder Co-authored-by: dimitris Co-authored-by: nogo <0xnogo@gmail.com> Co-authored-by: nogo <110664798+0xnogo@users.noreply.github.com> Co-authored-by: valerii-kabisov-cll <172247313+valerii-kabisov-cll@users.noreply.github.com> Co-authored-by: Ilja Pavlovs <5300706+iljapavlovs@users.noreply.github.com> Co-authored-by: Matthew Pendrey --- .changeset/young-mice-invent.md | 5 + .mockery.yaml | 93 +- core/config/docs/core.toml | 2 + core/config/toml/types.go | 4 + core/scripts/go.mod | 2 +- core/scripts/go.sum | 4 +- core/services/chainlink/config_test.go | 2 + .../testdata/config-empty-effective.toml | 1 + .../chainlink/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 1 + core/services/feeds/mocks/service.go | 33 + core/services/feeds/service.go | 14 + core/services/ocr2/delegate.go | 401 +++- core/services/ocr2/plugins/ccip/LICENSE.md | 55 + .../plugins/ccip/abihelpers/abi_helpers.go | 187 ++ .../ccip/abihelpers/abi_helpers_test.go | 147 ++ .../ocr2/plugins/ccip/ccipcommit/factory.go | 150 ++ .../plugins/ccip/ccipcommit/factory_test.go | 100 + .../plugins/ccip/ccipcommit/initializers.go | 241 +++ .../ocr2/plugins/ccip/ccipcommit/ocr2.go | 753 +++++++ .../ocr2/plugins/ccip/ccipcommit/ocr2_test.go | 1861 +++++++++++++++++ .../ocr2/plugins/ccip/ccipexec/batching.go | 540 +++++ .../plugins/ccip/ccipexec/batching_test.go | 910 ++++++++ .../ocr2/plugins/ccip/ccipexec/factory.go | 164 ++ .../plugins/ccip/ccipexec/factory_test.go | 67 + .../ocr2/plugins/ccip/ccipexec/gashelpers.go | 83 + .../plugins/ccip/ccipexec/gashelpers_test.go | 179 ++ .../ocr2/plugins/ccip/ccipexec/helpers.go | 53 + .../plugins/ccip/ccipexec/helpers_test.go | 96 + .../ocr2/plugins/ccip/ccipexec/inflight.go | 82 + .../plugins/ccip/ccipexec/inflight_test.go | 42 + .../plugins/ccip/ccipexec/initializers.go | 228 ++ .../ocr2/plugins/ccip/ccipexec/ocr2.go | 845 ++++++++ .../ocr2/plugins/ccip/ccipexec/ocr2_test.go | 1421 +++++++++++++ .../plugins/ccip/clo_ccip_integration_test.go | 137 ++ .../ocr2/plugins/ccip/config/chain_config.go | 48 + .../plugins/ccip/config/chain_config_test.go | 135 ++ .../ocr2/plugins/ccip/config/config.go | 152 ++ .../ocr2/plugins/ccip/config/config_test.go | 234 +++ .../plugins/ccip/config/offchain_config.go | 26 + .../plugins/ccip/config/type_and_version.go | 73 + .../ocr2/plugins/ccip/exportinternal.go | 135 ++ .../plugins/ccip/integration_legacy_test.go | 599 ++++++ .../ocr2/plugins/ccip/integration_test.go | 644 ++++++ .../plugins/ccip/internal/cache/autosync.go | 141 ++ .../ccip/internal/cache/autosync_test.go | 128 ++ .../ccip/internal/cache/chain_health.go | 273 +++ .../ccip/internal/cache/chain_health_test.go | 303 +++ .../ccip/internal/cache/commit_roots.go | 243 +++ .../ccip/internal/cache/commit_roots_test.go | 297 +++ .../internal/cache/commit_roots_unit_test.go | 212 ++ .../ocr2/plugins/ccip/internal/cache/lazy.go | 20 + .../plugins/ccip/internal/cache/lazy_test.go | 71 + .../internal/cache/mocks/chain_health_mock.go | 183 ++ .../internal/cache/observed_chain_health.go | 70 + .../cache/observed_chain_health_test.go | 62 + .../ocr2/plugins/ccip/internal/cache/once.go | 38 + .../plugins/ccip/internal/cache/once_test.go | 83 + .../plugins/ccip/internal/ccipcalc/addr.go | 44 + .../plugins/ccip/internal/ccipcalc/calc.go | 69 + .../ccip/internal/ccipcalc/calc_test.go | 220 ++ .../ccip/internal/ccipcommon/shortcuts.go | 140 ++ .../internal/ccipcommon/shortcuts_test.go | 196 ++ .../mocks/token_pool_batched_reader_mock.go | 142 ++ .../batchreader/token_pool_batch_reader.go | 192 ++ .../token_pool_batch_reader_test.go | 86 + .../mocks/price_registry_mock.go | 97 + .../ccipdata/ccipdataprovider/provider.go | 40 + .../internal/ccipdata/commit_store_reader.go | 81 + .../ccipdata/commit_store_reader_test.go | 423 ++++ .../internal/ccipdata/factory/commit_store.go | 121 ++ .../ccipdata/factory/commit_store_test.go | 37 + .../ccip/internal/ccipdata/factory/offramp.go | 125 ++ .../internal/ccipdata/factory/offramp_test.go | 44 + .../ccip/internal/ccipdata/factory/onramp.go | 88 + .../internal/ccipdata/factory/onramp_test.go | 45 + .../ccipdata/factory/price_registry.go | 82 + .../ccipdata/factory/price_registry_test.go | 46 + .../ccipdata/factory/versionfinder.go | 44 + .../mocks/commit_store_reader_mock.go | 985 +++++++++ .../ccipdata/mocks/offramp_reader_mock.go | 949 +++++++++ .../ccipdata/mocks/onramp_reader_mock.go | 480 +++++ .../mocks/price_registry_reader_mock.go | 498 +++++ .../ccipdata/mocks/token_pool_reader_mock.go | 127 ++ .../ccipdata/mocks/usdc_reader_mock.go | 97 + .../ccip/internal/ccipdata/offramp_reader.go | 13 + .../internal/ccipdata/offramp_reader_test.go | 416 ++++ .../ccip/internal/ccipdata/onramp_reader.go | 21 + .../internal/ccipdata/onramp_reader_test.go | 479 +++++ .../ccipdata/price_registry_reader.go | 14 + .../ccipdata/price_registry_reader_test.go | 296 +++ .../plugins/ccip/internal/ccipdata/reader.go | 78 + .../ccip/internal/ccipdata/reader_test.go | 72 + .../ccip/internal/ccipdata/retry_config.go | 9 + .../ccip/internal/ccipdata/test_utils.go | 36 + .../internal/ccipdata/token_pool_reader.go | 10 + .../ccip/internal/ccipdata/usdc_reader.go | 169 ++ .../ccipdata/usdc_reader_internal_test.go | 178 ++ .../internal/ccipdata/v1_0_0/commit_store.go | 456 ++++ .../ccipdata/v1_0_0/commit_store_test.go | 49 + .../ccip/internal/ccipdata/v1_0_0/hasher.go | 85 + .../internal/ccipdata/v1_0_0/hasher_test.go | 84 + .../ccip/internal/ccipdata/v1_0_0/offramp.go | 689 ++++++ .../ccipdata/v1_0_0/offramp_reader_test.go | 38 + .../v1_0_0/offramp_reader_unit_test.go | 231 ++ .../internal/ccipdata/v1_0_0/offramp_test.go | 232 ++ .../ccip/internal/ccipdata/v1_0_0/onramp.go | 240 +++ .../ccipdata/v1_0_0/price_registry.go | 310 +++ .../internal/ccipdata/v1_0_0/test_helpers.go | 90 + .../ccip/internal/ccipdata/v1_1_0/onramp.go | 70 + .../internal/ccipdata/v1_2_0/commit_store.go | 469 +++++ .../ccipdata/v1_2_0/commit_store_test.go | 224 ++ .../ccip/internal/ccipdata/v1_2_0/hasher.go | 101 + .../internal/ccipdata/v1_2_0/hasher_test.go | 78 + .../ccip/internal/ccipdata/v1_2_0/offramp.go | 340 +++ .../ccipdata/v1_2_0/offramp_reader_test.go | 38 + .../v1_2_0/offramp_reader_unit_test.go | 36 + .../internal/ccipdata/v1_2_0/offramp_test.go | 173 ++ .../ccip/internal/ccipdata/v1_2_0/onramp.go | 255 +++ .../internal/ccipdata/v1_2_0/onramp_test.go | 57 + .../ccipdata/v1_2_0/price_registry.go | 68 + .../internal/ccipdata/v1_2_0/test_helpers.go | 48 + .../internal/ccipdata/v1_2_0/token_pool.go | 48 + .../ccipdata/v1_2_0/token_pool_test.go | 24 + .../internal/ccipdata/v1_4_0/token_pool.go | 48 + .../ccipdata/v1_4_0/token_pool_test.go | 24 + .../internal/ccipdata/v1_5_0/commit_store.go | 59 + .../ccip/internal/ccipdata/v1_5_0/hasher.go | 101 + .../internal/ccipdata/v1_5_0/hasher_test.go | 78 + .../ccip/internal/ccipdata/v1_5_0/offramp.go | 199 ++ .../internal/ccipdata/v1_5_0/offramp_test.go | 1 + .../ccip/internal/ccipdata/v1_5_0/onramp.go | 259 +++ .../internal/ccipdata/v1_5_0/onramp_test.go | 210 ++ .../ccipdb/mocks/price_service_mock.go | 250 +++ .../ccip/internal/ccipdb/price_service.go | 400 ++++ .../internal/ccipdb/price_service_test.go | 755 +++++++ .../ccip/internal/logpollerutil/filters.go | 73 + .../internal/logpollerutil/filters_test.go | 156 ++ .../internal/observability/commit_store.go | 75 + .../ccip/internal/observability/metrics.go | 75 + .../internal/observability/metrics_test.go | 87 + .../ccip/internal/observability/offramp.go | 69 + .../ccip/internal/observability/onramp.go | 63 + .../observability/onramp_observed_test.go | 155 ++ .../internal/observability/price_registry.go | 64 + .../internal/oraclelib/backfilled_oracle.go | 218 ++ .../oraclelib/backfilled_oracle_test.go | 56 + .../plugins/ccip/internal/parseutil/bigint.go | 44 + .../ccip/internal/parseutil/bigint_test.go | 42 + .../plugins/ccip/internal/pricegetter/evm.go | 239 +++ .../ccip/internal/pricegetter/evm_test.go | 546 +++++ .../plugins/ccip/internal/pricegetter/mock.go | 211 ++ .../ccip/internal/pricegetter/pipeline.go | 114 + .../internal/pricegetter/pipeline_test.go | 178 ++ .../ccip/internal/pricegetter/pricegetter.go | 7 + .../ocr2/plugins/ccip/internal/rpclib/evm.go | 337 +++ .../plugins/ccip/internal/rpclib/evm_test.go | 223 ++ .../internal/rpclib/rpclibmocks/evm_mock.go | 97 + core/services/ocr2/plugins/ccip/metrics.go | 99 + .../ocr2/plugins/ccip/metrics_test.go | 47 + .../ocr2/plugins/ccip/observations.go | 149 ++ .../ocr2/plugins/ccip/observations_test.go | 305 +++ .../ocr2/plugins/ccip/pkg/leafer/leafer.go | 61 + .../plugins/ccip/prices/da_price_estimator.go | 176 ++ .../ccip/prices/da_price_estimator_test.go | 440 ++++ .../ccip/prices/exec_price_estimator.go | 65 + .../ccip/prices/exec_price_estimator_test.go | 351 ++++ .../ccip/prices/gas_price_estimator.go | 59 + .../prices/gas_price_estimator_commit_mock.go | 269 +++ .../prices/gas_price_estimator_exec_mock.go | 274 +++ .../ccip/prices/gas_price_estimator_mock.go | 331 +++ .../ocr2/plugins/ccip/proxycommitstore.go | 135 ++ .../ccip/testhelpers/ccip_contracts.go | 1580 ++++++++++++++ .../ocr2/plugins/ccip/testhelpers/config.go | 73 + .../ccip/testhelpers/integration/chainlink.go | 1035 +++++++++ .../ccip/testhelpers/integration/jobspec.go | 334 +++ .../ocr2/plugins/ccip/testhelpers/offramp.go | 119 ++ .../ccip/testhelpers/simulated_backend.go | 75 + .../plugins/ccip/testhelpers/structfields.go | 44 + .../testhelpers_1_4_0/ccip_contracts_1_4_0.go | 1585 ++++++++++++++ .../testhelpers_1_4_0/chainlink.go | 1045 +++++++++ .../testhelpers_1_4_0/config_1_4_0.go | 73 + .../ocr2/plugins/ccip/tokendata/bgworker.go | 213 ++ .../plugins/ccip/tokendata/bgworker_test.go | 188 ++ .../ccip/tokendata/http/http_client.go | 48 + .../tokendata/http/observed_http_client.go | 69 + .../observability/usdc_client_test.go | 151 ++ .../ocr2/plugins/ccip/tokendata/reader.go | 19 + .../plugins/ccip/tokendata/reader_mock.go | 143 ++ .../ocr2/plugins/ccip/tokendata/usdc/usdc.go | 339 +++ .../ccip/tokendata/usdc/usdc_blackbox_test.go | 119 ++ .../plugins/ccip/tokendata/usdc/usdc_test.go | 423 ++++ .../ocr2/plugins/ccip/transactions.rlp | Bin 0 -> 115794 bytes .../plugins/ccip/transmitter/transmitter.go | 143 ++ .../ccip/transmitter/transmitter_test.go | 282 +++ core/services/ocr2/plugins/ccip/vars.go | 14 + core/services/ocr2/validate/validate.go | 59 +- core/services/relay/evm/ccip.go | 205 ++ core/services/relay/evm/ccip_test.go | 18 + core/services/relay/evm/commit_provider.go | 309 +++ core/services/relay/evm/evm.go | 227 +- core/services/relay/evm/exec_provider.go | 391 ++++ .../mocks/ccip_transaction_status_checker.go | 104 + .../evm/statuschecker/txm_status_checker.go | 54 + .../statuschecker/txm_status_checker_test.go | 103 + core/services/synchronization/common.go | 2 + .../testdata/config-empty-effective.toml | 1 + core/web/resolver/testdata/config-full.toml | 1 + .../config-multi-chain-effective.toml | 1 + docs/CONFIG.md | 7 + go.mod | 8 +- go.sum | 9 +- integration-tests/go.mod | 2 +- integration-tests/go.sum | 4 +- integration-tests/load/go.mod | 2 +- integration-tests/load/go.sum | 4 +- testdata/scripts/node/validate/default.txtar | 1 + .../disk-based-logging-disabled.txtar | 1 + .../validate/disk-based-logging-no-dir.txtar | 1 + .../node/validate/disk-based-logging.txtar | 1 + .../node/validate/invalid-ocr-p2p.txtar | 1 + testdata/scripts/node/validate/invalid.txtar | 1 + testdata/scripts/node/validate/valid.txtar | 1 + testdata/scripts/node/validate/warnings.txtar | 1 + 224 files changed, 42778 insertions(+), 25 deletions(-) create mode 100644 .changeset/young-mice-invent.md create mode 100644 core/services/ocr2/plugins/ccip/LICENSE.md create mode 100644 core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go create mode 100644 core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/factory.go create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/initializers.go create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go create mode 100644 core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/batching.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/batching_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/factory.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/factory_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/helpers.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/inflight.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/initializers.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/ocr2.go create mode 100644 core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go create mode 100644 core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go create mode 100644 core/services/ocr2/plugins/ccip/config/chain_config.go create mode 100644 core/services/ocr2/plugins/ccip/config/chain_config_test.go create mode 100644 core/services/ocr2/plugins/ccip/config/config.go create mode 100644 core/services/ocr2/plugins/ccip/config/config_test.go create mode 100644 core/services/ocr2/plugins/ccip/config/offchain_config.go create mode 100644 core/services/ocr2/plugins/ccip/config/type_and_version.go create mode 100644 core/services/ocr2/plugins/ccip/exportinternal.go create mode 100644 core/services/ocr2/plugins/ccip/integration_legacy_test.go create mode 100644 core/services/ocr2/plugins/ccip/integration_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/autosync.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/chain_health.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/lazy.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/once.go create mode 100644 core/services/ocr2/plugins/ccip/internal/cache/once_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go create mode 100644 core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go create mode 100644 core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/commit_store.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/metrics.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/onramp.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/observability/price_registry.go create mode 100644 core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go create mode 100644 core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go create mode 100644 core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/evm.go create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go create mode 100644 core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go create mode 100644 core/services/ocr2/plugins/ccip/metrics.go create mode 100644 core/services/ocr2/plugins/ccip/metrics_test.go create mode 100644 core/services/ocr2/plugins/ccip/observations.go create mode 100644 core/services/ocr2/plugins/ccip/observations_test.go create mode 100644 core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go create mode 100644 core/services/ocr2/plugins/ccip/prices/da_price_estimator.go create mode 100644 core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go create mode 100644 core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go create mode 100644 core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go create mode 100644 core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go create mode 100644 core/services/ocr2/plugins/ccip/proxycommitstore.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/config.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/offramp.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/structfields.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go create mode 100644 core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/bgworker.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/http/http_client.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/reader.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/reader_mock.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go create mode 100644 core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go create mode 100644 core/services/ocr2/plugins/ccip/transactions.rlp create mode 100644 core/services/ocr2/plugins/ccip/transmitter/transmitter.go create mode 100644 core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go create mode 100644 core/services/ocr2/plugins/ccip/vars.go create mode 100644 core/services/relay/evm/ccip.go create mode 100644 core/services/relay/evm/ccip_test.go create mode 100644 core/services/relay/evm/commit_provider.go create mode 100644 core/services/relay/evm/exec_provider.go create mode 100644 core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go create mode 100644 core/services/relay/evm/statuschecker/txm_status_checker.go create mode 100644 core/services/relay/evm/statuschecker/txm_status_checker_test.go diff --git a/.changeset/young-mice-invent.md b/.changeset/young-mice-invent.md new file mode 100644 index 00000000000..ba9c67198aa --- /dev/null +++ b/.changeset/young-mice-invent.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Added CCIP plugins code from https://github.com/smartcontractkit/ccip/ #added diff --git a/.mockery.yaml b/.mockery.yaml index 77d2145a461..8fab61a5b9d 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -457,4 +457,95 @@ packages: filename: optimism_dispute_game_factory_interface.go outpkg: mock_optimism_dispute_game_factory interfaces: - OptimismDisputeGameFactoryInterface: + OptimismDisputeGameFactoryInterface: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache: + config: + filename: chain_health_mock.go + interfaces: + ChainHealthcheck: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata: + interfaces: + CommitStoreReader: + config: + filename: commit_store_reader_mock.go + OffRampReader: + config: + filename: offramp_reader_mock.go + OnRampReader: + config: + filename: onramp_reader_mock.go + PriceRegistryReader: + config: + filename: price_registry_reader_mock.go + TokenPoolReader: + config: + filename: token_pool_reader_mock.go + USDCReader: + config: + filename: usdc_reader_mock.go + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader: + config: + filename: token_pool_batched_reader_mock.go + interfaces: + TokenPoolBatchedReader: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider: + config: + filename: price_registry_mock.go + interfaces: + PriceRegistry: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb: + config: + filename: price_service_mock.go + interfaces: + PriceService: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter: + config: + filename: mock.go + dir: "{{ .InterfaceDir }}/" + outpkg: pricegetter + interfaces: + PriceGetter: + config: + mockname: "Mock{{ .InterfaceName }}" + github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker: + interfaces: + CCIPTransactionStatusChecker: + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib: + config: + outpkg: rpclibmocks + interfaces: + BatchCaller: + config: + filename: batch_caller.go + dir: core/services/relay/evm/rpclibmocks + EvmBatchCaller: + config: + filename: evm_mock.go + dir: "{{ .InterfaceDir }}/rpclibmocks" + + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices: + config: + dir: "{{ .InterfaceDir }}/" + outpkg: prices + interfaces: + GasPriceEstimatorCommit: + config: + mockname: "Mock{{ .InterfaceName }}" + filename: gas_price_estimator_commit_mock.go + GasPriceEstimatorExec: + config: + mockname: "Mock{{ .InterfaceName }}" + filename: gas_price_estimator_exec_mock.go + GasPriceEstimator: + config: + mockname: "Mock{{ .InterfaceName }}" + filename: gas_price_estimator_mock.go + github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata: + config: + filename: reader_mock.go + dir: "{{ .InterfaceDir }}/" + outpkg: tokendata + interfaces: + Reader: + config: + mockname: "Mock{{ .InterfaceName }}" \ No newline at end of file diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml index d1b922cf291..d0960779c6c 100644 --- a/core/config/docs/core.toml +++ b/core/config/docs/core.toml @@ -13,6 +13,8 @@ FeedsManager = true # Default LogPoller = false # Default # UICSAKeys enables CSA Keys in the UI. UICSAKeys = false # Default +# CCIP enables the CCIP service. +CCIP = true # Default [Database] # DefaultIdleInTxSessionTimeout is the maximum time allowed for a transaction to be open and idle before timing out. See Postgres `idle_in_transaction_session_timeout` for more details. diff --git a/core/config/toml/types.go b/core/config/toml/types.go index f827f086225..0c91ddd81a9 100644 --- a/core/config/toml/types.go +++ b/core/config/toml/types.go @@ -303,6 +303,7 @@ type Feature struct { FeedsManager *bool LogPoller *bool UICSAKeys *bool + CCIP *bool } func (f *Feature) setFrom(f2 *Feature) { @@ -315,6 +316,9 @@ func (f *Feature) setFrom(f2 *Feature) { if v := f2.UICSAKeys; v != nil { f.UICSAKeys = v } + if v := f2.CCIP; v != nil { + f.CCIP = v + } } type Database struct { diff --git a/core/scripts/go.mod b/core/scripts/go.mod index fe4ee2c9748..0b7f510bcd8 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -60,7 +60,7 @@ require ( github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/XSAM/otelsql v0.27.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/avast/retry-go/v4 v4.5.1 // indirect + github.com/avast/retry-go/v4 v4.6.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 76eaf615279..6abc303888f 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -147,8 +147,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= -github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 9b40e4dfce4..0038be8a979 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -261,6 +261,7 @@ func TestConfig_Marshal(t *testing.T) { FeedsManager: ptr(true), LogPoller: ptr(true), UICSAKeys: ptr(true), + CCIP: ptr(true), } full.Database = toml.Database{ DefaultIdleInTxSessionTimeout: commoncfg.MustNewDuration(time.Minute), @@ -771,6 +772,7 @@ Headers = ['Authorization: token', 'X-SomeOther-Header: value with spaces | and FeedsManager = true LogPoller = true UICSAKeys = true +CCIP = true `}, {"Database", Config{Core: toml.Core{Database: full.Database}}, `[Database] DefaultIdleInTxSessionTimeout = '1m0s' diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml index 1bad3fd91c6..f1325d824ea 100644 --- a/core/services/chainlink/testdata/config-empty-effective.toml +++ b/core/services/chainlink/testdata/config-empty-effective.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index 21d68c23ada..d752398f039 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '10s' FeedsManager = true LogPoller = true UICSAKeys = true +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1m0s' diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index c56e755d360..12427650f42 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/core/services/feeds/mocks/service.go b/core/services/feeds/mocks/service.go index a660420759e..d37c327850d 100644 --- a/core/services/feeds/mocks/service.go +++ b/core/services/feeds/mocks/service.go @@ -1403,6 +1403,39 @@ func (_c *Service_SyncNodeInfo_Call) RunAndReturn(run func(context.Context, int6 return _c } +// Unsafe_SetConnectionsManager provides a mock function with given fields: _a0 +func (_m *Service) Unsafe_SetConnectionsManager(_a0 feeds.ConnectionsManager) { + _m.Called(_a0) +} + +// Service_Unsafe_SetConnectionsManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Unsafe_SetConnectionsManager' +type Service_Unsafe_SetConnectionsManager_Call struct { + *mock.Call +} + +// Unsafe_SetConnectionsManager is a helper method to define mock.On call +// - _a0 feeds.ConnectionsManager +func (_e *Service_Expecter) Unsafe_SetConnectionsManager(_a0 interface{}) *Service_Unsafe_SetConnectionsManager_Call { + return &Service_Unsafe_SetConnectionsManager_Call{Call: _e.mock.On("Unsafe_SetConnectionsManager", _a0)} +} + +func (_c *Service_Unsafe_SetConnectionsManager_Call) Run(run func(_a0 feeds.ConnectionsManager)) *Service_Unsafe_SetConnectionsManager_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(feeds.ConnectionsManager)) + }) + return _c +} + +func (_c *Service_Unsafe_SetConnectionsManager_Call) Return() *Service_Unsafe_SetConnectionsManager_Call { + _c.Call.Return() + return _c +} + +func (_c *Service_Unsafe_SetConnectionsManager_Call) RunAndReturn(run func(feeds.ConnectionsManager)) *Service_Unsafe_SetConnectionsManager_Call { + _c.Call.Return(run) + return _c +} + // UpdateChainConfig provides a mock function with given fields: ctx, cfg func (_m *Service) UpdateChainConfig(ctx context.Context, cfg feeds.ChainConfig) (int64, error) { ret := _m.Called(ctx, cfg) diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go index b11b2b0167a..1733d4a7582 100644 --- a/core/services/feeds/service.go +++ b/core/services/feeds/service.go @@ -107,6 +107,9 @@ type Service interface { ListSpecsByJobProposalIDs(ctx context.Context, ids []int64) ([]JobProposalSpec, error) RejectSpec(ctx context.Context, id int64) error UpdateSpecDefinition(ctx context.Context, id int64, spec string) error + + // Unsafe_SetConnectionsManager Only for testing + Unsafe_SetConnectionsManager(ConnectionsManager) } type service struct { @@ -1105,6 +1108,16 @@ func (s *service) observeJobProposalCounts(ctx context.Context) error { return nil } +// Unsafe_SetConnectionsManager sets the ConnectionsManager on the service. +// +// We need to be able to inject a mock for the client to facilitate integration +// tests. +// +// ONLY TO BE USED FOR TESTING. +func (s *service) Unsafe_SetConnectionsManager(connMgr ConnectionsManager) { + s.connMgr = connMgr +} + // findExistingJobForOCR2 looks for existing job for OCR2 func findExistingJobForOCR2(ctx context.Context, j *job.Job, tx job.ORM) (int32, error) { var contractID string @@ -1501,5 +1514,6 @@ func (ns NullService) IsJobManaged(ctx context.Context, jobID int64) (bool, erro func (ns NullService) UpdateSpecDefinition(ctx context.Context, id int64, spec string) error { return ErrFeedsManagerDisabled } +func (ns NullService) Unsafe_SetConnectionsManager(_ ConnectionsManager) {} //revive:enable diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index db0f4e9725e..5c44825ca2a 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -6,18 +6,25 @@ import ( "encoding/json" "fmt" "log" + "strconv" "time" + "gopkg.in/guregu/null.v4" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/types/core" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" - "gopkg.in/guregu/null.v4" - + chainselectors "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/libocr/commontypes" libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "google.golang.org/grpc" ocr2keepers20 "github.com/smartcontractkit/chainlink-automation/pkg/v2" ocr2keepers20config "github.com/smartcontractkit/chainlink-automation/pkg/v2/config" @@ -26,13 +33,11 @@ import ( ocr2keepers20runner "github.com/smartcontractkit/chainlink-automation/pkg/v2/runner" ocr2keepers21config "github.com/smartcontractkit/chainlink-automation/pkg/v3/config" ocr2keepers21 "github.com/smartcontractkit/chainlink-automation/pkg/v3/plugin" - "github.com/smartcontractkit/chainlink-common/pkg/loop" "github.com/smartcontractkit/chainlink-common/pkg/loop/reportingplugins" "github.com/smartcontractkit/chainlink-common/pkg/loop/reportingplugins/ocr3" "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" "github.com/smartcontractkit/chainlink-common/pkg/types" - "github.com/smartcontractkit/chainlink-common/pkg/types/core" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo" @@ -47,12 +52,15 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" "github.com/smartcontractkit/chainlink/v2/core/services/llo" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipcommit" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipexec" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/functions" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic" lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/median" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21" ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" @@ -68,6 +76,8 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/synchronization" "github.com/smartcontractkit/chainlink/v2/core/services/telemetry" "github.com/smartcontractkit/chainlink/v2/plugins" + + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" ) type ErrJobSpecNoRelayer struct { @@ -284,6 +294,7 @@ func (d *Delegate) cleanupEVM(ctx context.Context, jb job.Job, relayID types.Rel // an inconsistent state. This assumes UnregisterFilter will return nil if the filter wasn't found // at all (no rows deleted). spec := jb.OCR2OracleSpec + transmitterID := spec.TransmitterID.String chain, err := d.legacyChains.Get(relayID.ChainID) if err != nil { d.lggr.Errorw("cleanupEVM: failed to get chain id", "chainId", relayID.ChainID, "err", err) @@ -305,6 +316,51 @@ func (d *Delegate) cleanupEVM(ctx context.Context, jb job.Job, relayID types.Rel d.lggr.Errorw("failed to derive ocr2keeper filter names from spec", "err", err, "spec", spec) } filters = append(filters, filters21...) + case types.CCIPCommit: + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + var pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig) + if err != nil { + return err + } + + dstProvider, err2 := d.ccipCommitGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID) + if err2 != nil { + return err + } + + srcProvider, _, err2 := d.ccipCommitGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider) + if err2 != nil { + return err + } + err2 = ccipcommit.UnregisterCommitPluginLpFilters(srcProvider, dstProvider) + if err2 != nil { + d.lggr.Errorw("failed to unregister ccip commit plugin filters", "err", err2, "spec", spec) + } + return nil + case types.CCIPExecution: + // PROVIDER BASED ARG CONSTRUCTION + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + var pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig) + if err != nil { + return err + } + + dstProvider, err2 := d.ccipExecGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID) + if err2 != nil { + return err + } + + srcProvider, _, err2 := d.ccipExecGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider) + if err2 != nil { + return err + } + err2 = ccipexec.UnregisterExecPluginLpFilters(srcProvider, dstProvider) + if err2 != nil { + d.lggr.Errorw("failed to unregister ccip exec plugin filters", "err", err2, "spec", spec) + } + return nil default: return nil } @@ -448,6 +504,10 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi return d.newServicesGenericPlugin(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, d.capabilitiesRegistry, kvStore) + case types.CCIPCommit: + return d.newServicesCCIPCommit(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, transmitterID) + case types.CCIPExecution: + return d.newServicesCCIPExecution(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc, transmitterID) default: return nil, errors.Errorf("plugin type %s not supported", spec.PluginType) } @@ -1498,6 +1558,337 @@ func (d *Delegate) newServicesOCR2Functions( return append([]job.ServiceCtx{functionsProvider, thresholdProvider, s4Provider, ocrLogger}, functionsServices...), nil } +func (d *Delegate) newServicesCCIPCommit(ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, lc ocrtypes.LocalConfig, transmitterID string) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + if spec.Relay != relay.NetworkEVM { + return nil, fmt.Errorf("non evm chains are not supported for CCIP commit") + } + dstRid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)} + } + + logError := func(msg string) { + lggr.ErrorIf(d.jobORM.RecordError(context.Background(), jb.ID, msg), "unable to record error") + } + + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + var pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig) + if err != nil { + return nil, err + } + + dstChainID, err := strconv.ParseInt(dstRid.ChainID, 10, 64) + if err != nil { + return nil, err + } + + dstProvider, err := d.ccipCommitGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID) + if err != nil { + return nil, err + } + + srcProvider, srcChainID, err := d.ccipCommitGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider) + if err != nil { + return nil, err + } + + oracleArgsNoPlugin := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: dstProvider.ContractTransmitter(), + ContractConfigTracker: dstProvider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint( + dstRid.Network, + dstRid.ChainID, + spec.ContractID, + synchronization.OCR2CCIPCommit, + ), + OffchainConfigDigester: dstProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": jb.Name.ValueOrZero()}, prometheus.DefaultRegisterer), + } + + return ccipcommit.NewCommitServices(ctx, d.ds, srcProvider, dstProvider, d.legacyChains, jb, lggr, d.pipelineRunner, oracleArgsNoPlugin, d.isNewlyCreatedJob, int64(srcChainID), dstChainID, logError) +} + +func newCCIPCommitPluginBytes(isSourceProvider bool, sourceStartBlock uint64, destStartBlock uint64) config.CommitPluginConfig { + return config.CommitPluginConfig{ + IsSourceProvider: isSourceProvider, + SourceStartBlock: sourceStartBlock, + DestStartBlock: destStartBlock, + } +} + +func (d *Delegate) ccipCommitGetDstProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig, transmitterID string) (types.CCIPCommitProvider, error) { + spec := jb.OCR2OracleSpec + if spec.Relay != relay.NetworkEVM { + return nil, fmt.Errorf("non evm chains are not supported for CCIP commit") + } + + dstRid, err := spec.RelayID() + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)} + } + + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + dstConfigBytes, err := newCCIPCommitPluginBytes(false, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock).Encode() + if err != nil { + return nil, err + } + + // Get provider from dest chain + dstRelayer, err := d.RelayGetter.Get(dstRid) + if err != nil { + return nil, err + } + + provider, err := dstRelayer.NewPluginProvider(ctx, + types.RelayArgs{ + ContractID: spec.ContractID, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(types.CCIPCommit), + }, + types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: dstConfigBytes, + }) + if err != nil { + return nil, fmt.Errorf("unable to create ccip commit provider: %w", err) + } + dstProvider, ok := provider.(types.CCIPCommitProvider) + if !ok { + return nil, fmt.Errorf("could not coerce PluginProvider to CCIPCommitProvider") + } + + return dstProvider, nil +} + +func (d *Delegate) ccipCommitGetSrcProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.CommitPluginJobSpecConfig, transmitterID string, dstProvider types.CCIPCommitProvider) (srcProvider types.CCIPCommitProvider, srcChainID uint64, err error) { + spec := jb.OCR2OracleSpec + srcConfigBytes, err := newCCIPCommitPluginBytes(true, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock).Encode() + if err != nil { + return nil, 0, err + } + // Use OffRampReader to get src chain ID and fetch the src relayer + + var pluginConfig ccipconfig.CommitPluginJobSpecConfig + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return nil, 0, err + } + offRampAddress := pluginConfig.OffRamp + offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress) + if err != nil { + return nil, 0, fmt.Errorf("create offRampReader: %w", err) + } + + offRampConfig, err := offRampReader.GetStaticConfig(ctx) + if err != nil { + return nil, 0, fmt.Errorf("get offRamp static config: %w", err) + } + + srcChainID, err = chainselectors.ChainIdFromSelector(offRampConfig.SourceChainSelector) + if err != nil { + return nil, 0, err + } + srcChainIDstr := strconv.FormatUint(srcChainID, 10) + + // Get provider from source chain + srcRelayer, err := d.RelayGetter.Get(types.RelayID{Network: spec.Relay, ChainID: srcChainIDstr}) + if err != nil { + return nil, 0, err + } + provider, err := srcRelayer.NewPluginProvider(ctx, + types.RelayArgs{ + ContractID: "", // Contract address only valid for dst chain + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(types.CCIPCommit), + }, + types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: srcConfigBytes, + }) + if err != nil { + return nil, 0, fmt.Errorf("srcRelayer.NewPluginProvider: %w", err) + } + srcProvider, ok := provider.(types.CCIPCommitProvider) + if !ok { + return nil, 0, fmt.Errorf("could not coerce PluginProvider to CCIPCommitProvider") + } + + return +} + +func (d *Delegate) newServicesCCIPExecution(ctx context.Context, lggr logger.SugaredLogger, jb job.Job, bootstrapPeers []commontypes.BootstrapperLocator, kb ocr2key.KeyBundle, ocrDB *db, lc ocrtypes.LocalConfig, transmitterID string) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + if spec.Relay != relay.NetworkEVM { + return nil, fmt.Errorf("non evm chains are not supported for CCIP execution") + } + dstRid, err := spec.RelayID() + + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)} + } + + logError := func(msg string) { + lggr.ErrorIf(d.jobORM.RecordError(context.Background(), jb.ID, msg), "unable to record error") + } + + // PROVIDER BASED ARG CONSTRUCTION + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + var pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig + err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginJobSpecConfig) + if err != nil { + return nil, err + } + + dstChainID, err := strconv.ParseInt(dstRid.ChainID, 10, 64) + if err != nil { + return nil, err + } + + dstProvider, err := d.ccipExecGetDstProvider(ctx, jb, pluginJobSpecConfig, transmitterID) + if err != nil { + return nil, err + } + + srcProvider, srcChainID, err := d.ccipExecGetSrcProvider(ctx, jb, pluginJobSpecConfig, transmitterID, dstProvider) + if err != nil { + return nil, err + } + + oracleArgsNoPlugin2 := libocr2.OCR2OracleArgs{ + BinaryNetworkEndpointFactory: d.peerWrapper.Peer2, + V2Bootstrappers: bootstrapPeers, + ContractTransmitter: dstProvider.ContractTransmitter(), + ContractConfigTracker: dstProvider.ContractConfigTracker(), + Database: ocrDB, + LocalConfig: lc, + MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint( + dstRid.Network, + dstRid.ChainID, + spec.ContractID, + synchronization.OCR2CCIPExec, + ), + OffchainConfigDigester: dstProvider.OffchainConfigDigester(), + OffchainKeyring: kb, + OnchainKeyring: kb, + MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": jb.Name.ValueOrZero()}, prometheus.DefaultRegisterer), + } + + return ccipexec.NewExecServices(ctx, lggr, jb, srcProvider, dstProvider, int64(srcChainID), dstChainID, d.isNewlyCreatedJob, oracleArgsNoPlugin2, logError) +} + +func (d *Delegate) ccipExecGetDstProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig, transmitterID string) (types.CCIPExecProvider, error) { + spec := jb.OCR2OracleSpec + if spec.Relay != relay.NetworkEVM { + return nil, fmt.Errorf("non evm chains are not supported for CCIP execution") + } + dstRid, err := spec.RelayID() + + if err != nil { + return nil, ErrJobSpecNoRelayer{Err: err, PluginName: string(spec.PluginType)} + } + + // PROVIDER BASED ARG CONSTRUCTION + // Write PluginConfig bytes to send source/dest relayer provider + info outside of top level rargs/pargs over the wire + dstConfigBytes, err := newExecPluginConfig(false, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock, pluginJobSpecConfig.USDCConfig, string(jb.ID)).Encode() + if err != nil { + return nil, err + } + + // Get provider from dest chain + dstRelayer, err := d.RelayGetter.Get(dstRid) + if err != nil { + return nil, err + } + provider, err := dstRelayer.NewPluginProvider(ctx, + types.RelayArgs{ + ContractID: spec.ContractID, + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(types.CCIPExecution), + }, + types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: dstConfigBytes, + }) + if err != nil { + return nil, fmt.Errorf("NewPluginProvider failed on dstRelayer: %w", err) + } + dstProvider, ok := provider.(types.CCIPExecProvider) + if !ok { + return nil, fmt.Errorf("could not coerce PluginProvider to CCIPExecProvider") + } + + return dstProvider, nil +} + +func (d *Delegate) ccipExecGetSrcProvider(ctx context.Context, jb job.Job, pluginJobSpecConfig ccipconfig.ExecPluginJobSpecConfig, transmitterID string, dstProvider types.CCIPExecProvider) (srcProvider types.CCIPExecProvider, srcChainID uint64, err error) { + spec := jb.OCR2OracleSpec + srcConfigBytes, err := newExecPluginConfig(true, pluginJobSpecConfig.SourceStartBlock, pluginJobSpecConfig.DestStartBlock, pluginJobSpecConfig.USDCConfig, string(jb.ID)).Encode() + if err != nil { + return nil, 0, err + } + + // Use OffRampReader to get src chain ID and fetch the src relayer + offRampAddress := cciptypes.Address(common.HexToAddress(spec.ContractID).String()) + offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress) + if err != nil { + return nil, 0, fmt.Errorf("create offRampReader: %w", err) + } + + offRampConfig, err := offRampReader.GetStaticConfig(ctx) + if err != nil { + return nil, 0, fmt.Errorf("get offRamp static config: %w", err) + } + + srcChainID, err = chainselectors.ChainIdFromSelector(offRampConfig.SourceChainSelector) + if err != nil { + return nil, 0, err + } + srcChainIDstr := strconv.FormatUint(srcChainID, 10) + + // Get provider from source chain + srcRelayer, err := d.RelayGetter.Get(types.RelayID{Network: spec.Relay, ChainID: srcChainIDstr}) + if err != nil { + return nil, 0, fmt.Errorf("failed to get relayer: %w", err) + } + provider, err := srcRelayer.NewPluginProvider(ctx, + types.RelayArgs{ + ContractID: "", + RelayConfig: spec.RelayConfig.Bytes(), + ProviderType: string(types.CCIPExecution), + }, + types.PluginArgs{ + TransmitterID: transmitterID, + PluginConfig: srcConfigBytes, + }) + if err != nil { + return nil, 0, err + } + srcProvider, ok := provider.(types.CCIPExecProvider) + if !ok { + return nil, 0, fmt.Errorf("could not coerce PluginProvider to CCIPExecProvider: %w", err) + } + + return +} + +func newExecPluginConfig(isSourceProvider bool, srcStartBlock uint64, dstStartBlock uint64, usdcConfig ccipconfig.USDCConfig, jobID string) config.ExecPluginConfig { + return config.ExecPluginConfig{ + IsSourceProvider: isSourceProvider, + SourceStartBlock: srcStartBlock, + DestStartBlock: dstStartBlock, + USDCConfig: usdcConfig, + JobID: jobID, + } +} + // errorLog implements [loop.ErrorLog] type errorLog struct { jobID int32 diff --git a/core/services/ocr2/plugins/ccip/LICENSE.md b/core/services/ocr2/plugins/ccip/LICENSE.md new file mode 100644 index 00000000000..b127e1a823a --- /dev/null +++ b/core/services/ocr2/plugins/ccip/LICENSE.md @@ -0,0 +1,55 @@ +Business Source License 1.1 + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Parameters + +Licensor: SmartContract Chainlink Limited SEZC + +Licensed Work: Cross-Chain Interoperability Protocol v1.4 +The Licensed Work is (c) 2023 SmartContract Chainlink Limited SEZC + +Additional Use Grant: Any uses listed and defined at [v1.4-CCIP-License-grants](../../../../../contracts/src/v0.8/ccip/v1.4-CCIP-License-grants) + +Change Date: May 23, 2027 + +Change License: MIT + +----------------------------------------------------------------------------- + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. + +If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. + +MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark "Business Source License", as long as you comply with the Covenants of Licensor below. + +----------------------------------------------------------------------------- + +Covenants of Licensor + +In consideration of the right to use this License’s text and the "Business Source License" name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where "compatible" means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. + +2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text "None". + +3. To specify a Change Date. + +4. Not to modify this License in any other way. \ No newline at end of file diff --git a/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go new file mode 100644 index 00000000000..d0ad5642d94 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers.go @@ -0,0 +1,187 @@ +package abihelpers + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/smartcontractkit/libocr/gethwrappers2/ocr2aggregator" +) + +func MustGetEventID(name string, abi2 abi.ABI) common.Hash { + event, ok := abi2.Events[name] + if !ok { + panic(fmt.Sprintf("missing event %s", name)) + } + return event.ID +} + +func MustGetEventInputs(name string, abi2 abi.ABI) abi.Arguments { + m, ok := abi2.Events[name] + if !ok { + panic(fmt.Sprintf("missing event %s", name)) + } + return m.Inputs +} + +func MustGetMethodInputs(name string, abi2 abi.ABI) abi.Arguments { + m, ok := abi2.Methods[name] + if !ok { + panic(fmt.Sprintf("missing method %s", name)) + } + return m.Inputs +} + +func MustParseABI(abiStr string) abi.ABI { + abiParsed, err := abi.JSON(strings.NewReader(abiStr)) + if err != nil { + panic(err) + } + return abiParsed +} + +// ProofFlagsToBits transforms a list of boolean proof flags to a *big.Int +// encoded number. +func ProofFlagsToBits(proofFlags []bool) *big.Int { + encodedFlags := big.NewInt(0) + for i := 0; i < len(proofFlags); i++ { + if proofFlags[i] { + encodedFlags.SetBit(encodedFlags, i, 1) + } + } + return encodedFlags +} + +type AbiDefined interface { + AbiString() string +} + +type AbiDefinedValid interface { + AbiDefined + Validate() error +} + +func ABIEncode(abiStr string, values ...interface{}) ([]byte, error) { + inAbi, err := getABI(abiStr, ENCODE) + if err != nil { + return nil, err + } + res, err := inAbi.Pack("method", values...) + if err != nil { + return nil, err + } + return res[4:], nil +} + +func ABIDecode(abiStr string, data []byte) ([]interface{}, error) { + inAbi, err := getABI(abiStr, DECODE) + if err != nil { + return nil, err + } + return inAbi.Unpack("method", data) +} + +func EncodeAbiStruct[T AbiDefined](decoded T) ([]byte, error) { + return ABIEncode(decoded.AbiString(), decoded) +} + +func EncodeAddress(address common.Address) ([]byte, error) { + return ABIEncode(`[{"type":"address"}]`, address) +} + +func DecodeAbiStruct[T AbiDefinedValid](encoded []byte) (T, error) { + var empty T + + decoded, err := ABIDecode(empty.AbiString(), encoded) + if err != nil { + return empty, err + } + + converted := abi.ConvertType(decoded[0], &empty) + if casted, ok := converted.(*T); ok { + return *casted, (*casted).Validate() + } + return empty, fmt.Errorf("can't cast from %T to %T", converted, empty) +} + +func EvmWord(i uint64) common.Hash { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return common.BigToHash(big.NewInt(0).SetBytes(b)) +} + +func DecodeOCR2Config(encoded []byte) (*ocr2aggregator.OCR2AggregatorConfigSet, error) { + unpacked := new(ocr2aggregator.OCR2AggregatorConfigSet) + abiPointer, err := ocr2aggregator.OCR2AggregatorMetaData.GetAbi() + if err != nil { + return unpacked, err + } + defaultABI := *abiPointer + err = defaultABI.UnpackIntoInterface(unpacked, "ConfigSet", encoded) + if err != nil { + return unpacked, errors.Wrap(err, "failed to unpack log data") + } + return unpacked, nil +} + +// create const encode and decode +const ( + ENCODE = iota + DECODE +) + +type abiCache struct { + cache map[string]*abi.ABI + mu *sync.RWMutex +} + +func newAbiCache() *abiCache { + return &abiCache{ + cache: make(map[string]*abi.ABI), + mu: &sync.RWMutex{}, + } +} + +// Global cache for ABIs to avoid parsing the same ABI multiple times +// As the module is already a helper module and not a service, we can keep the cache global +// It's private to the package and can't be accessed from outside +var myAbiCache = newAbiCache() + +// This Function is used to get the ABI from the cache or create a new one and cache it for later use +// operationType is used to differentiate between encoding and decoding +// encoding uses a definition with `inputs` and decoding uses a definition with `outputs` (check inDef) +func getABI(abiStr string, operationType uint8) (*abi.ABI, error) { + var operationStr string + switch operationType { + case ENCODE: + operationStr = "inputs" + case DECODE: + operationStr = "outputs" + default: + return nil, fmt.Errorf("invalid operation type") + } + + inDef := fmt.Sprintf(`[{ "name" : "method", "type": "function", "%s": %s}]`, operationStr, abiStr) + + myAbiCache.mu.RLock() + if cachedAbi, found := myAbiCache.cache[inDef]; found { + myAbiCache.mu.RUnlock() // unlocking before returning + return cachedAbi, nil + } + myAbiCache.mu.RUnlock() + + res, err := abi.JSON(strings.NewReader(inDef)) + if err != nil { + return nil, err + } + + myAbiCache.mu.Lock() + defer myAbiCache.mu.Unlock() + myAbiCache.cache[inDef] = &res + return &res, nil +} diff --git a/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go new file mode 100644 index 00000000000..4890aeb1188 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/abihelpers/abi_helpers_test.go @@ -0,0 +1,147 @@ +package abihelpers + +import ( + "bytes" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +func TestProofFlagToBits(t *testing.T) { + genFlags := func(indexesSet []int, size int) []bool { + bools := make([]bool, size) + for _, indexSet := range indexesSet { + bools[indexSet] = true + } + return bools + } + tt := []struct { + flags []bool + expected *big.Int + }{ + { + []bool{true, false, true}, + big.NewInt(5), + }, + { + []bool{true, true, false}, // Note the bits are reversed, slightly easier to implement. + big.NewInt(3), + }, + { + []bool{false, true, true}, + big.NewInt(6), + }, + { + []bool{false, false, false}, + big.NewInt(0), + }, + { + []bool{true, true, true}, + big.NewInt(7), + }, + { + genFlags([]int{266}, 300), + big.NewInt(0).SetBit(big.NewInt(0), 266, 1), + }, + } + for _, tc := range tt { + tc := tc + a := ProofFlagsToBits(tc.flags) + assert.Equal(t, tc.expected.String(), a.String()) + } +} + +func TestEvmWord(t *testing.T) { + testCases := []struct { + inp uint64 + exp common.Hash + }{ + {inp: 1, exp: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")}, + {inp: math.MaxUint64, exp: common.HexToHash("0x000000000000000000000000000000000000000000000000ffffffffffffffff")}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("test %d", tc.inp), func(t *testing.T) { + h := EvmWord(tc.inp) + assert.Equal(t, tc.exp, h) + }) + } +} + +func TestABIEncodeDecode(t *testing.T) { + abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]` + values := []interface{}{struct { + Int1 *big.Int `json:"int1"` + Int2 *big.Int `json:"int2"` + }{big.NewInt(10), big.NewInt(12)}} + + // First encoding, should call the underlying utils.ABIEncode + encoded, err := ABIEncode(abiStr, values...) + assert.NoError(t, err) + assert.NotNil(t, encoded) + + // Second encoding, should retrieve from cache + // we're just testing here that it returns same result + encodedAgain, err := ABIEncode(abiStr, values...) + + assert.NoError(t, err) + assert.True(t, bytes.Equal(encoded, encodedAgain)) + + // Should be able to decode it back to the original values + decoded, err := ABIDecode(abiStr, encoded) + assert.NoError(t, err) + assert.Equal(t, decoded, values) +} + +func BenchmarkComparisonEncode(b *testing.B) { + abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]` + values := []interface{}{struct { + Int1 *big.Int `json:"int1"` + Int2 *big.Int `json:"int2"` + }{big.NewInt(10), big.NewInt(12)}} + + b.Run("WithoutCache", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = utils.ABIEncode(abiStr, values...) + } + }) + + // Warm up the cache + _, _ = ABIEncode(abiStr, values...) + + b.Run("WithCache", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = ABIEncode(abiStr, values...) + } + }) +} + +func BenchmarkComparisonDecode(b *testing.B) { + abiStr := `[{"components": [{"name":"int1","type":"int256"},{"name":"int2","type":"int256"}], "type":"tuple"}]` + values := []interface{}{struct { + Int1 *big.Int `json:"int1"` + Int2 *big.Int `json:"int2"` + }{big.NewInt(10), big.NewInt(12)}} + data, _ := utils.ABIEncode(abiStr, values...) + + b.Run("WithoutCache", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = utils.ABIDecode(abiStr, data) + } + }) + + // Warm up the cache + _, _ = ABIDecode(abiStr, data) + + b.Run("WithCache", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = ABIDecode(abiStr, data) + } + }) +} diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/factory.go b/core/services/ocr2/plugins/ccip/ccipcommit/factory.go new file mode 100644 index 00000000000..648f62a23a2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipcommit/factory.go @@ -0,0 +1,150 @@ +package ccipcommit + +import ( + "context" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type CommitReportingPluginFactory struct { + // Configuration derived from the job spec which does not change + // between plugin instances (ie between SetConfigs onchain) + config CommitPluginStaticConfig + + // Dynamic readers + readersMu *sync.Mutex + destPriceRegReader ccipdata.PriceRegistryReader + destPriceRegAddr common.Address +} + +// NewCommitReportingPluginFactory return a new CommitReportingPluginFactory. +func NewCommitReportingPluginFactory(config CommitPluginStaticConfig) *CommitReportingPluginFactory { + return &CommitReportingPluginFactory{ + config: config, + readersMu: &sync.Mutex{}, + + // the fields below are initially empty and populated on demand + destPriceRegReader: nil, + destPriceRegAddr: common.Address{}, + } +} + +func (rf *CommitReportingPluginFactory) UpdateDynamicReaders(ctx context.Context, newPriceRegAddr common.Address) error { + rf.readersMu.Lock() + defer rf.readersMu.Unlock() + // TODO: Investigate use of Close() to cleanup. + // TODO: a true price registry upgrade on an existing lane may want some kind of start block in its config? Right now we + // essentially assume that plugins don't care about historical price reg logs. + if rf.destPriceRegAddr == newPriceRegAddr { + // No-op + return nil + } + // Close old reader if present and open new reader if address changed + if rf.destPriceRegReader != nil { + if err := rf.destPriceRegReader.Close(); err != nil { + return err + } + } + + destPriceRegistryReader, err := rf.config.priceRegistryProvider.NewPriceRegistryReader(context.Background(), cciptypes.Address(newPriceRegAddr.String())) + if err != nil { + return fmt.Errorf("init dynamic price registry: %w", err) + } + rf.destPriceRegReader = destPriceRegistryReader + rf.destPriceRegAddr = newPriceRegAddr + return nil +} + +type reportingPluginAndInfo struct { + plugin types.ReportingPlugin + pluginInfo types.ReportingPluginInfo +} + +// NewReportingPlugin registers a new ReportingPlugin +func (rf *CommitReportingPluginFactory) NewReportingPlugin(config types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) { + initialRetryDelay := rf.config.newReportingPluginRetryConfig.InitialDelay + maxDelay := rf.config.newReportingPluginRetryConfig.MaxDelay + + pluginAndInfo, err := ccipcommon.RetryUntilSuccess(rf.NewReportingPluginFn(config), initialRetryDelay, maxDelay) + if err != nil { + return nil, types.ReportingPluginInfo{}, err + } + return pluginAndInfo.plugin, pluginAndInfo.pluginInfo, err +} + +// NewReportingPluginFn implements the NewReportingPlugin logic. It is defined as a function so that it can easily be +// retried via RetryUntilSuccess. NewReportingPlugin must return successfully in order for the Commit plugin to +// function, hence why we can only keep retrying it until it succeeds. +func (rf *CommitReportingPluginFactory) NewReportingPluginFn(config types.ReportingPluginConfig) func() (reportingPluginAndInfo, error) { + return func() (reportingPluginAndInfo, error) { + ctx := context.Background() // todo: consider adding some timeout + + destPriceReg, err := rf.config.commitStore.ChangeConfig(ctx, config.OnchainConfig, config.OffchainConfig) + if err != nil { + return reportingPluginAndInfo{}, err + } + + priceRegEvmAddr, err := ccipcalc.GenericAddrToEvm(destPriceReg) + if err != nil { + return reportingPluginAndInfo{}, err + } + if err = rf.UpdateDynamicReaders(ctx, priceRegEvmAddr); err != nil { + return reportingPluginAndInfo{}, err + } + + pluginOffChainConfig, err := rf.config.commitStore.OffchainConfig(ctx) + if err != nil { + return reportingPluginAndInfo{}, err + } + + gasPriceEstimator, err := rf.config.commitStore.GasPriceEstimator(ctx) + if err != nil { + return reportingPluginAndInfo{}, err + } + + err = rf.config.priceService.UpdateDynamicConfig(ctx, gasPriceEstimator, rf.destPriceRegReader) + if err != nil { + return reportingPluginAndInfo{}, err + } + + lggr := rf.config.lggr.Named("CommitReportingPlugin") + plugin := &CommitReportingPlugin{ + sourceChainSelector: rf.config.sourceChainSelector, + sourceNative: rf.config.sourceNative, + onRampReader: rf.config.onRampReader, + destChainSelector: rf.config.destChainSelector, + commitStoreReader: rf.config.commitStore, + F: config.F, + lggr: lggr, + destPriceRegistryReader: rf.destPriceRegReader, + offRampReader: rf.config.offRamp, + gasPriceEstimator: gasPriceEstimator, + offchainConfig: pluginOffChainConfig, + metricsCollector: rf.config.metricsCollector, + chainHealthcheck: rf.config.chainHealthcheck, + priceService: rf.config.priceService, + } + + pluginInfo := types.ReportingPluginInfo{ + Name: "CCIPCommit", + UniqueReports: false, // See comment in CommitStore constructor. + Limits: types.ReportingPluginLimits{ + MaxQueryLength: ccip.MaxQueryLength, + MaxObservationLength: ccip.MaxObservationLength, + MaxReportLength: MaxCommitReportLength, + }, + } + + return reportingPluginAndInfo{plugin, pluginInfo}, nil + } +} diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go b/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go new file mode 100644 index 00000000000..825026bd17e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipcommit/factory_test.go @@ -0,0 +1,100 @@ +package ccipcommit + +import ( + "errors" + "testing" + "time" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + dbMocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks" +) + +// Assert that NewReportingPlugin keeps retrying until it succeeds. +// +// NewReportingPlugin makes several calls (e.g. CommitStoreReader.ChangeConfig) that can fail. We use mocks to cause the +// first call to each of these functions to fail, then all subsequent calls succeed. We assert that NewReportingPlugin +// retries a sufficient number of times to get through the transient errors and eventually succeed. +func TestNewReportingPluginRetriesUntilSuccess(t *testing.T) { + commitConfig := CommitPluginStaticConfig{} + + // For this unit test, ensure that there is no delay between retries + commitConfig.newReportingPluginRetryConfig = ccipdata.RetryConfig{ + InitialDelay: 0 * time.Nanosecond, + MaxDelay: 0 * time.Nanosecond, + } + + // Set up the OffRampReader mock + mockCommitStore := new(mocks.CommitStoreReader) + + // The first call is set to return an error, the following calls return a nil error + mockCommitStore. + On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything). + Return(ccip.Address(""), errors.New("")). + Once() + mockCommitStore. + On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything). + Return(ccip.Address("0x7c6e4F0BDe29f83BC394B75a7f313B7E5DbD2d77"), nil). + Times(5) + + mockCommitStore. + On("OffchainConfig", mock.Anything). + Return(ccip.CommitOffchainConfig{}, errors.New("")). + Once() + mockCommitStore. + On("OffchainConfig", mock.Anything). + Return(ccip.CommitOffchainConfig{}, nil). + Times(3) + + mockCommitStore. + On("GasPriceEstimator", mock.Anything). + Return(nil, errors.New("")). + Once() + mockCommitStore. + On("GasPriceEstimator", mock.Anything). + Return(nil, nil). + Times(2) + + commitConfig.commitStore = mockCommitStore + + mockPriceService := new(dbMocks.PriceService) + + mockPriceService. + On("UpdateDynamicConfig", mock.Anything, mock.Anything, mock.Anything). + Return(errors.New("")). + Once() + mockPriceService. + On("UpdateDynamicConfig", mock.Anything, mock.Anything, mock.Anything). + Return(nil) + + commitConfig.priceService = mockPriceService + + priceRegistryProvider := new(ccipdataprovidermocks.PriceRegistry) + priceRegistryProvider. + On("NewPriceRegistryReader", mock.Anything, mock.Anything). + Return(nil, errors.New("")). + Once() + priceRegistryProvider. + On("NewPriceRegistryReader", mock.Anything, mock.Anything). + Return(nil, nil). + Once() + commitConfig.priceRegistryProvider = priceRegistryProvider + + commitConfig.lggr, _ = logger.NewLogger() + + factory := NewCommitReportingPluginFactory(commitConfig) + reportingConfig := types.ReportingPluginConfig{} + reportingConfig.OnchainConfig = []byte{1, 2, 3} + reportingConfig.OffchainConfig = []byte{1, 2, 3} + + // Assert that NewReportingPlugin succeeds despite many transient internal failures (mocked out above) + _, _, err := factory.NewReportingPlugin(reportingConfig) + assert.Equal(t, nil, err) +} diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go b/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go new file mode 100644 index 00000000000..e964896ab93 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipcommit/initializers.go @@ -0,0 +1,241 @@ +package ccipcommit + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "strings" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/common" + libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus" + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + + commonlogger "github.com/smartcontractkit/chainlink-common/pkg/logger" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + + cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + db "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/oraclelib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/promwrapper" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" +) + +var defaultNewReportingPluginRetryConfig = ccipdata.RetryConfig{InitialDelay: time.Second, MaxDelay: 5 * time.Minute} + +func NewCommitServices(ctx context.Context, ds sqlutil.DataSource, srcProvider commontypes.CCIPCommitProvider, dstProvider commontypes.CCIPCommitProvider, chainSet legacyevm.LegacyChainContainer, jb job.Job, lggr logger.Logger, pr pipeline.Runner, argsNoPlugin libocr2.OCR2OracleArgs, new bool, sourceChainID int64, destChainID int64, logError func(string)) ([]job.ServiceCtx, error) { + spec := jb.OCR2OracleSpec + + var pluginConfig ccipconfig.CommitPluginJobSpecConfig + err := json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return nil, err + } + + commitStoreAddress := common.HexToAddress(spec.ContractID) + + // commit store contract doesn't exist on the source chain, but we have an implementation of it + // to get access to a gas estimator on the source chain + srcCommitStore, err := srcProvider.NewCommitStoreReader(ctx, ccipcalc.EvmAddrToGeneric(commitStoreAddress)) + if err != nil { + return nil, err + } + + dstCommitStore, err := dstProvider.NewCommitStoreReader(ctx, ccipcalc.EvmAddrToGeneric(commitStoreAddress)) + if err != nil { + return nil, err + } + + var commitStoreReader ccipdata.CommitStoreReader + commitStoreReader = ccip.NewProviderProxyCommitStoreReader(srcCommitStore, dstCommitStore) + commitLggr := lggr.Named("CCIPCommit").With("sourceChain", sourceChainID, "destChain", destChainID) + + var priceGetter pricegetter.PriceGetter + withPipeline := strings.Trim(pluginConfig.TokenPricesUSDPipeline, "\n\t ") != "" + if withPipeline { + priceGetter, err = pricegetter.NewPipelineGetter(pluginConfig.TokenPricesUSDPipeline, pr, jb.ID, jb.ExternalJobID, jb.Name.ValueOrZero(), lggr) + if err != nil { + return nil, fmt.Errorf("creating pipeline price getter: %w", err) + } + } else { + // Use dynamic price getter. + if pluginConfig.PriceGetterConfig == nil { + return nil, fmt.Errorf("priceGetterConfig is nil") + } + + // Build price getter clients for all chains specified in the aggregator configurations. + // Some lanes (e.g. Wemix/Kroma) requires other clients than source and destination, since they use feeds from other chains. + priceGetterClients := map[uint64]pricegetter.DynamicPriceGetterClient{} + for _, aggCfg := range pluginConfig.PriceGetterConfig.AggregatorPrices { + chainID := aggCfg.ChainID + // Retrieve the chain. + chain, _, err2 := ccipconfig.GetChainByChainID(chainSet, chainID) + if err2 != nil { + return nil, fmt.Errorf("retrieving chain for chainID %d: %w", chainID, err2) + } + caller := rpclib.NewDynamicLimitedBatchCaller( + lggr, + chain.Client(), + rpclib.DefaultRpcBatchSizeLimit, + rpclib.DefaultRpcBatchBackOffMultiplier, + rpclib.DefaultMaxParallelRpcCalls, + ) + priceGetterClients[chainID] = pricegetter.NewDynamicPriceGetterClient(caller) + } + + priceGetter, err = pricegetter.NewDynamicPriceGetter(*pluginConfig.PriceGetterConfig, priceGetterClients) + if err != nil { + return nil, fmt.Errorf("creating dynamic price getter: %w", err) + } + } + + offRampReader, err := dstProvider.NewOffRampReader(ctx, pluginConfig.OffRamp) + if err != nil { + return nil, err + } + + staticConfig, err := commitStoreReader.GetCommitStoreStaticConfig(ctx) + if err != nil { + return nil, err + } + onRampAddress := staticConfig.OnRamp + + onRampReader, err := srcProvider.NewOnRampReader(ctx, onRampAddress, staticConfig.SourceChainSelector, staticConfig.ChainSelector) + if err != nil { + return nil, err + } + + onRampRouterAddr, err := onRampReader.RouterAddress(ctx) + if err != nil { + return nil, err + } + sourceNative, err := srcProvider.SourceNativeToken(ctx, onRampRouterAddr) + if err != nil { + return nil, err + } + // Prom wrappers + onRampReader = observability.NewObservedOnRampReader(onRampReader, sourceChainID, ccip.CommitPluginLabel) + commitStoreReader = observability.NewObservedCommitStoreReader(commitStoreReader, destChainID, ccip.CommitPluginLabel) + offRampReader = observability.NewObservedOffRampReader(offRampReader, destChainID, ccip.CommitPluginLabel) + metricsCollector := ccip.NewPluginMetricsCollector(ccip.CommitPluginLabel, sourceChainID, destChainID) + + chainHealthCheck := cache.NewObservedChainHealthCheck( + cache.NewChainHealthcheck( + // Adding more details to Logger to make healthcheck logs more informative + // It's safe because healthcheck logs only in case of unhealthy state + lggr.With( + "onramp", onRampAddress, + "commitStore", commitStoreAddress, + "offramp", pluginConfig.OffRamp, + ), + onRampReader, + commitStoreReader, + ), + ccip.CommitPluginLabel, + sourceChainID, // assuming this is the chain id? + destChainID, + onRampAddress, + ) + + orm, err := cciporm.NewORM(ds) + if err != nil { + return nil, err + } + + priceService := db.NewPriceService( + lggr, + orm, + jb.ID, + staticConfig.ChainSelector, + staticConfig.SourceChainSelector, + sourceNative, + priceGetter, + offRampReader, + ) + + wrappedPluginFactory := NewCommitReportingPluginFactory(CommitPluginStaticConfig{ + lggr: lggr, + newReportingPluginRetryConfig: defaultNewReportingPluginRetryConfig, + onRampReader: onRampReader, + sourceChainSelector: staticConfig.SourceChainSelector, + sourceNative: sourceNative, + offRamp: offRampReader, + commitStore: commitStoreReader, + destChainSelector: staticConfig.ChainSelector, + priceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(dstProvider), + metricsCollector: metricsCollector, + chainHealthcheck: chainHealthCheck, + priceService: priceService, + }) + argsNoPlugin.ReportingPluginFactory = promwrapper.NewPromFactory(wrappedPluginFactory, "CCIPCommit", jb.OCR2OracleSpec.Relay, big.NewInt(0).SetInt64(destChainID)) + argsNoPlugin.Logger = commonlogger.NewOCRWrapper(commitLggr, true, logError) + oracle, err := libocr2.NewOracle(argsNoPlugin) + if err != nil { + return nil, err + } + // If this is a brand-new job, then we make use of the start blocks. If not then we're rebooting and log poller will pick up where we left off. + if new { + return []job.ServiceCtx{ + oraclelib.NewChainAgnosticBackFilledOracle( + lggr, + srcProvider, + dstProvider, + job.NewServiceAdapter(oracle), + ), + chainHealthCheck, + priceService, + }, nil + } + return []job.ServiceCtx{ + job.NewServiceAdapter(oracle), + chainHealthCheck, + priceService, + }, nil +} + +func CommitReportToEthTxMeta(typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) { + return factory.CommitReportToEthTxMeta(typ, ver) +} + +// UnregisterCommitPluginLpFilters unregisters all the registered filters for both source and dest chains. +// NOTE: The transaction MUST be used here for CLO's monster tx to function as expected +// https://github.com/smartcontractkit/ccip/blob/68e2197472fb017dd4e5630d21e7878d58bc2a44/core/services/feeds/service.go#L716 +// TODO once that transaction is broken up, we should be able to simply rely on oracle.Close() to cleanup the filters. +// Until then we have to deterministically reload the readers from the spec (and thus their filters) and close them. +func UnregisterCommitPluginLpFilters(srcProvider commontypes.CCIPCommitProvider, dstProvider commontypes.CCIPCommitProvider) error { + unregisterFuncs := []func() error{ + func() error { + return srcProvider.Close() + }, + func() error { + return dstProvider.Close() + }, + } + + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go new file mode 100644 index 00000000000..2f0fc4e7956 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2.go @@ -0,0 +1,753 @@ +package ccipcommit + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider" + db "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +const ( + // only dynamic field in CommitReport is tokens PriceUpdates, and we don't expect to need to update thousands of tokens in a single tx + MaxCommitReportLength = 10_000 + // Maximum inflight seq number range before we consider reports to be failing to get included entirely + // and restart from the chain's minSeqNum. Want to set it high to allow for large throughput, + // but low enough to minimize wasted revert cost. + MaxInflightSeqNumGap = 500 + // OnRampMessagesScanLimit is used to limit number of onramp messages scanned in each Observation. + // Single CommitRoot can contain up to merklemulti.MaxNumberTreeLeaves, so we scan twice that to be safe and still don't hurt DB performance. + OnRampMessagesScanLimit = merklemulti.MaxNumberTreeLeaves * 2 +) + +var ( + _ types.ReportingPluginFactory = &CommitReportingPluginFactory{} + _ types.ReportingPlugin = &CommitReportingPlugin{} +) + +type update struct { + timestamp time.Time + value *big.Int +} + +type CommitPluginStaticConfig struct { + lggr logger.Logger + newReportingPluginRetryConfig ccipdata.RetryConfig + // Source + onRampReader ccipdata.OnRampReader + sourceChainSelector uint64 + sourceNative cciptypes.Address + // Dest + offRamp ccipdata.OffRampReader + commitStore ccipdata.CommitStoreReader + destChainSelector uint64 + priceRegistryProvider ccipdataprovider.PriceRegistry + // Offchain + metricsCollector ccip.PluginMetricsCollector + chainHealthcheck cache.ChainHealthcheck + priceService db.PriceService +} + +type CommitReportingPlugin struct { + lggr logger.Logger + // Source + onRampReader ccipdata.OnRampReader + sourceChainSelector uint64 + sourceNative cciptypes.Address + gasPriceEstimator prices.GasPriceEstimatorCommit + // Dest + destChainSelector uint64 + commitStoreReader ccipdata.CommitStoreReader + destPriceRegistryReader ccipdata.PriceRegistryReader + offchainConfig cciptypes.CommitOffchainConfig + offRampReader ccipdata.OffRampReader + F int + // Offchain + metricsCollector ccip.PluginMetricsCollector + // State + chainHealthcheck cache.ChainHealthcheck + // DB + priceService db.PriceService +} + +// Query is not used by the CCIP Commit plugin. +func (r *CommitReportingPlugin) Query(context.Context, types.ReportTimestamp) (types.Query, error) { + return types.Query{}, nil +} + +// Observation calculates the sequence number interval ready to be committed and +// the token and gas price updates required. A valid report could contain a merkle +// root and price updates. Price updates should never contain nil values, otherwise +// the observation will be considered invalid and rejected. +func (r *CommitReportingPlugin) Observation(ctx context.Context, epochAndRound types.ReportTimestamp, _ types.Query) (types.Observation, error) { + lggr := r.lggr.Named("CommitObservation") + if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil { + return nil, err + } else if !healthy { + return nil, ccip.ErrChainIsNotHealthy + } + + // Will return 0,0 if no messages are found. This is a valid case as the report could + // still contain fee updates. + minSeqNr, maxSeqNr, messageIDs, err := r.calculateMinMaxSequenceNumbers(ctx, lggr) + if err != nil { + return nil, err + } + + // Fetches multi-lane gasPricesUSD and tokenPricesUSD for the same dest chain + gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, err := r.observePriceUpdates(ctx) + if err != nil { + return nil, err + } + + lggr.Infow("Observation", + "minSeqNr", minSeqNr, + "maxSeqNr", maxSeqNr, + "gasPricesUSD", gasPricesUSD, + "tokenPricesUSD", tokenPricesUSD, + "epochAndRound", epochAndRound, + "messageIDs", messageIDs, + ) + r.metricsCollector.NumberOfMessagesBasedOnInterval(ccip.Observation, minSeqNr, maxSeqNr) + + // Even if all values are empty we still want to communicate our observation + // with the other nodes, therefore, we always return the observed values. + return ccip.CommitObservation{ + Interval: cciptypes.CommitStoreInterval{ + Min: minSeqNr, + Max: maxSeqNr, + }, + TokenPricesUSD: tokenPricesUSD, + SourceGasPriceUSD: sourceGasPriceUSD, + SourceGasPriceUSDPerChain: gasPricesUSD, + }.Marshal() +} + +// observePriceUpdates fetches latest gas and token prices from DB as long as price reporting is not disabled. +// The prices are aggregated for all lanes for the same destination chain. +func (r *CommitReportingPlugin) observePriceUpdates( + ctx context.Context, +) (gasPricesUSD map[uint64]*big.Int, sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) { + // Do not observe prices if price reporting is disabled. Price reporting will be disabled for lanes that are not leader lanes. + if r.offchainConfig.PriceReportingDisabled { + r.lggr.Infow("Price reporting disabled, skipping gas and token price reads") + return map[uint64]*big.Int{}, nil, map[cciptypes.Address]*big.Int{}, nil + } + + // Fetches multi-lane gas prices and token prices, for the given dest chain + gasPricesUSD, tokenPricesUSD, err = r.priceService.GetGasAndTokenPrices(ctx, r.destChainSelector) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to get prices from PriceService: %w", err) + } + + // Set prices to empty maps if nil to be friendlier to JSON encoding + if gasPricesUSD == nil { + gasPricesUSD = map[uint64]*big.Int{} + } + if tokenPricesUSD == nil { + tokenPricesUSD = map[cciptypes.Address]*big.Int{} + } + + // For backwards compatibility with the older release during phased rollout, set the default gas price on this lane + sourceGasPriceUSD = gasPricesUSD[r.sourceChainSelector] + + return gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, nil +} + +func (r *CommitReportingPlugin) calculateMinMaxSequenceNumbers(ctx context.Context, lggr logger.Logger) (uint64, uint64, []cciptypes.Hash, error) { + nextSeqNum, err := r.commitStoreReader.GetExpectedNextSequenceNumber(ctx) + if err != nil { + return 0, 0, []cciptypes.Hash{}, err + } + + msgRequests, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, nextSeqNum, nextSeqNum+OnRampMessagesScanLimit, true) + if err != nil { + return 0, 0, []cciptypes.Hash{}, err + } + if len(msgRequests) == 0 { + lggr.Infow("No new requests", "nextSeqNum", nextSeqNum) + return 0, 0, []cciptypes.Hash{}, nil + } + + messageIDs := make([]cciptypes.Hash, 0, len(msgRequests)) + seqNrs := make([]uint64, 0, len(msgRequests)) + for _, msgReq := range msgRequests { + seqNrs = append(seqNrs, msgReq.SequenceNumber) + messageIDs = append(messageIDs, msgReq.MessageID) + } + + minSeqNr := seqNrs[0] + maxSeqNr := seqNrs[len(seqNrs)-1] + if minSeqNr != nextSeqNum { + // Still report the observation as even partial reports have value e.g. all nodes are + // missing a single, different log each, they would still be able to produce a valid report. + lggr.Warnf("Missing sequence number range [%d-%d]", nextSeqNum, minSeqNr) + } + if !ccipcalc.ContiguousReqs(lggr, minSeqNr, maxSeqNr, seqNrs) { + return 0, 0, []cciptypes.Hash{}, errors.New("unexpected gap in seq nums") + } + return minSeqNr, maxSeqNr, messageIDs, nil +} + +// Gets the latest token price updates based on logs within the heartbeat +// The updates returned by this function are guaranteed to not contain nil values. +func (r *CommitReportingPlugin) getLatestTokenPriceUpdates(ctx context.Context, now time.Time) (map[cciptypes.Address]update, error) { + tokenPriceUpdates, err := r.destPriceRegistryReader.GetTokenPriceUpdatesCreatedAfter( + ctx, + now.Add(-r.offchainConfig.TokenPriceHeartBeat), + 0, + ) + if err != nil { + return nil, err + } + + latestUpdates := make(map[cciptypes.Address]update) + for _, tokenUpdate := range tokenPriceUpdates { + priceUpdate := tokenUpdate.TokenPriceUpdate + // Ordered by ascending timestamps + timestamp := time.Unix(priceUpdate.TimestampUnixSec.Int64(), 0) + if priceUpdate.Value != nil && !timestamp.Before(latestUpdates[priceUpdate.Token].timestamp) { + latestUpdates[priceUpdate.Token] = update{ + timestamp: timestamp, + value: priceUpdate.Value, + } + } + } + + return latestUpdates, nil +} + +// getLatestGasPriceUpdate returns the latest gas price updates based on logs within the heartbeat. +// If an update is found, it is not expected to contain a nil value. +func (r *CommitReportingPlugin) getLatestGasPriceUpdate(ctx context.Context, now time.Time) (map[uint64]update, error) { + gasPriceUpdates, err := r.destPriceRegistryReader.GetAllGasPriceUpdatesCreatedAfter( + ctx, + now.Add(-r.offchainConfig.GasPriceHeartBeat), + 0, + ) + + if err != nil { + return nil, err + } + + latestUpdates := make(map[uint64]update) + for _, gasUpdate := range gasPriceUpdates { + priceUpdate := gasUpdate.GasPriceUpdate + // Ordered by ascending timestamps + timestamp := time.Unix(priceUpdate.TimestampUnixSec.Int64(), 0) + if priceUpdate.Value != nil && !timestamp.Before(latestUpdates[priceUpdate.DestChainSelector].timestamp) { + latestUpdates[priceUpdate.DestChainSelector] = update{ + timestamp: timestamp, + value: priceUpdate.Value, + } + } + } + + r.lggr.Infow("Latest gas price from log poller", "latestUpdates", latestUpdates) + return latestUpdates, nil +} + +func (r *CommitReportingPlugin) Report(ctx context.Context, epochAndRound types.ReportTimestamp, _ types.Query, observations []types.AttributedObservation) (bool, types.Report, error) { + now := time.Now() + lggr := r.lggr.Named("CommitReport") + if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil { + return false, nil, err + } else if !healthy { + return false, nil, ccip.ErrChainIsNotHealthy + } + + parsableObservations := ccip.GetParsableObservations[ccip.CommitObservation](lggr, observations) + + intervals, gasPriceObs, tokenPriceObs, err := extractObservationData(lggr, r.F, r.sourceChainSelector, parsableObservations) + if err != nil { + return false, nil, err + } + + agreedInterval, err := calculateIntervalConsensus(intervals, r.F, merklemulti.MaxNumberTreeLeaves) + if err != nil { + return false, nil, err + } + + gasPrices, tokenPrices, err := r.selectPriceUpdates(ctx, now, gasPriceObs, tokenPriceObs) + if err != nil { + return false, nil, err + } + // If there are no fee updates and the interval is zero there is no report to produce. + if agreedInterval.Max == 0 && len(gasPrices) == 0 && len(tokenPrices) == 0 { + lggr.Infow("Empty report, skipping") + return false, nil, nil + } + + report, err := r.buildReport(ctx, lggr, agreedInterval, gasPrices, tokenPrices) + if err != nil { + return false, nil, err + } + encodedReport, err := r.commitStoreReader.EncodeCommitReport(ctx, report) + if err != nil { + return false, nil, err + } + r.metricsCollector.SequenceNumber(ccip.Report, report.Interval.Max) + r.metricsCollector.NumberOfMessagesBasedOnInterval(ccip.Report, report.Interval.Min, report.Interval.Max) + lggr.Infow("Report", + "merkleRoot", hex.EncodeToString(report.MerkleRoot[:]), + "minSeqNr", report.Interval.Min, + "maxSeqNr", report.Interval.Max, + "gasPriceUpdates", report.GasPrices, + "tokenPriceUpdates", report.TokenPrices, + "epochAndRound", epochAndRound, + ) + return true, encodedReport, nil +} + +// calculateIntervalConsensus compresses a set of intervals into one interval +// taking into account f which is the maximum number of faults across the whole DON. +// OCR itself won't call Report unless there are 2*f+1 observations +// https://github.com/smartcontractkit/libocr/blob/master/offchainreporting2/internal/protocol/report_generation_follower.go#L415 +// and f of those observations may be either unparseable or adversarially set values. That means +// we'll either have f+1 parsed honest values here, 2f+1 parsed values with f adversarial values or somewhere +// in between. +// rangeLimit is the maximum range of the interval. If the interval is larger than this, it will be truncated. Zero means no limit. +func calculateIntervalConsensus(intervals []cciptypes.CommitStoreInterval, f int, rangeLimit uint64) (cciptypes.CommitStoreInterval, error) { + // To understand min/max selection here, we need to consider an adversary that controls f values + // and is intentionally trying to stall the protocol or influence the value returned. For simplicity + // consider f=1 and n=4 nodes. In that case adversary may try to bias the min or max high/low. + // We could end up (2f+1=3) with sorted_mins=[1,1,1e9] or [-1e9,1,1] as examples. Selecting + // sorted_mins[f] ensures: + // - At least one honest node has seen this value, so adversary cannot bias the value lower which + // would cause reverts + // - If an honest oracle reports sorted_min[f] which happens to be stale i.e. that oracle + // has a delayed view of the chain, then the report will revert onchain but still succeed upon retry + // - We minimize the risk of naturally hitting the error condition minSeqNum > maxSeqNum due to oracles + // delayed views of the chain (would be an issue with taking sorted_mins[-f]) + sort.Slice(intervals, func(i, j int) bool { + return intervals[i].Min < intervals[j].Min + }) + minSeqNum := intervals[f].Min + + // The only way a report could have a minSeqNum of 0 is when there are no messages to report + // and the report is potentially still valid for gas fee updates. + if minSeqNum == 0 { + return cciptypes.CommitStoreInterval{Min: 0, Max: 0}, nil + } + // Consider a similar example to the sorted_mins one above except where they are maxes. + // We choose the more "conservative" sorted_maxes[f] so: + // - We are ensured that at least one honest oracle has seen the max, so adversary cannot set it lower and + // cause the maxSeqNum < minSeqNum errors + // - If an honest oracle reports sorted_max[f] which happens to be stale i.e. that oracle + // has a delayed view of the source chain, then we simply lose a little bit of throughput. + // - If we were to pick sorted_max[-f] i.e. the maximum honest node view (a more "aggressive" setting in terms of throughput), + // then an adversary can continually send high values e.g. imagine we have observations from all 4 nodes + // [honest 1, honest 1, honest 2, malicious 2], in this case we pick 2, but it's not enough to be able + // to build a report since the first 2 honest nodes are unaware of message 2. + sort.Slice(intervals, func(i, j int) bool { + return intervals[i].Max < intervals[j].Max + }) + maxSeqNum := intervals[f].Max + if maxSeqNum < minSeqNum { + // If the consensus report is invalid for onchain acceptance, we do not vote for it as + // an early termination step. + return cciptypes.CommitStoreInterval{}, errors.New("max seq num smaller than min") + } + + // If the range is too large, truncate it. + if rangeLimit > 0 && maxSeqNum-minSeqNum+1 > rangeLimit { + maxSeqNum = minSeqNum + rangeLimit - 1 + } + + return cciptypes.CommitStoreInterval{ + Min: minSeqNum, + Max: maxSeqNum, + }, nil +} + +// extractObservationData extracts observation fields into their own slices +// and filters out observation data that are invalid +func extractObservationData(lggr logger.Logger, f int, sourceChainSelector uint64, observations []ccip.CommitObservation) (intervals []cciptypes.CommitStoreInterval, gasPrices map[uint64][]*big.Int, tokenPrices map[cciptypes.Address][]*big.Int, err error) { + // We require at least f+1 observations to reach consensus. Checking to ensure there are at least f+1 parsed observations. + if len(observations) <= f { + return nil, nil, nil, fmt.Errorf("not enough observations to form consensus: #obs=%d, f=%d", len(observations), f) + } + + gasPriceObservations := make(map[uint64][]*big.Int) + tokenPriceObservations := make(map[cciptypes.Address][]*big.Int) + for _, obs := range observations { + intervals = append(intervals, obs.Interval) + + for selector, price := range obs.SourceGasPriceUSDPerChain { + if price != nil { + gasPriceObservations[selector] = append(gasPriceObservations[selector], price) + } + } + // During phased rollout, NOPs running old release only report SourceGasPriceUSD. + // An empty `SourceGasPriceUSDPerChain` with a non-nil `SourceGasPriceUSD` can only happen with old release. + if len(obs.SourceGasPriceUSDPerChain) == 0 && obs.SourceGasPriceUSD != nil { + gasPriceObservations[sourceChainSelector] = append(gasPriceObservations[sourceChainSelector], obs.SourceGasPriceUSD) + } + + for token, price := range obs.TokenPricesUSD { + if price != nil { + tokenPriceObservations[token] = append(tokenPriceObservations[token], price) + } + } + } + + // Price is dropped if there are not enough valid observations. With a threshold of 2*(f-1) + 1, we achieve a balance between safety and liveness. + // During phased-rollout where some honest nodes may not have started observing the token yet, it requires 5 malicious node with 1 being the leader to successfully alter price. + // During regular operation, it requires 3 malicious nodes with 1 being the leader to temporarily delay price update for the token. + priceReportingThreshold := 2*(f-1) + 1 + + gasPrices = make(map[uint64][]*big.Int) + for selector, perChainPriceObservations := range gasPriceObservations { + if len(perChainPriceObservations) < priceReportingThreshold { + lggr.Warnf("Skipping chain with selector %d due to not enough valid observations: #obs=%d, f=%d, threshold=%d", selector, len(perChainPriceObservations), f, priceReportingThreshold) + continue + } + gasPrices[selector] = perChainPriceObservations + } + + tokenPrices = make(map[cciptypes.Address][]*big.Int) + for token, perTokenPriceObservations := range tokenPriceObservations { + if len(perTokenPriceObservations) < priceReportingThreshold { + lggr.Warnf("Skipping token %s due to not enough valid observations: #obs=%d, f=%d, threshold=%d", string(token), len(perTokenPriceObservations), f, priceReportingThreshold) + continue + } + tokenPrices[token] = perTokenPriceObservations + } + + return intervals, gasPrices, tokenPrices, nil +} + +// selectPriceUpdates filters out gas and token price updates that are already inflight +func (r *CommitReportingPlugin) selectPriceUpdates(ctx context.Context, now time.Time, gasPriceObs map[uint64][]*big.Int, tokenPriceObs map[cciptypes.Address][]*big.Int) ([]cciptypes.GasPrice, []cciptypes.TokenPrice, error) { + // If price reporting is disabled, there is no need to select price updates. + if r.offchainConfig.PriceReportingDisabled { + return nil, nil, nil + } + + latestGasPrice, err := r.getLatestGasPriceUpdate(ctx, now) + if err != nil { + return nil, nil, err + } + + latestTokenPrices, err := r.getLatestTokenPriceUpdates(ctx, now) + if err != nil { + return nil, nil, err + } + + return r.calculatePriceUpdates(gasPriceObs, tokenPriceObs, latestGasPrice, latestTokenPrices) +} + +// Note priceUpdates must be deterministic. +// The provided gasPriceObs and tokenPriceObs should not contain nil values. +// The returned latestGasPrice and latestTokenPrices should not contain nil values. +func (r *CommitReportingPlugin) calculatePriceUpdates(gasPriceObs map[uint64][]*big.Int, tokenPriceObs map[cciptypes.Address][]*big.Int, latestGasPrice map[uint64]update, latestTokenPrices map[cciptypes.Address]update) ([]cciptypes.GasPrice, []cciptypes.TokenPrice, error) { + var tokenPriceUpdates []cciptypes.TokenPrice + for token, tokenPriceObservations := range tokenPriceObs { + medianPrice := ccipcalc.BigIntSortedMiddle(tokenPriceObservations) + + latestTokenPrice, exists := latestTokenPrices[token] + if exists { + tokenPriceUpdatedRecently := time.Since(latestTokenPrice.timestamp) < r.offchainConfig.TokenPriceHeartBeat + tokenPriceNotChanged := !ccipcalc.Deviates(medianPrice, latestTokenPrice.value, int64(r.offchainConfig.TokenPriceDeviationPPB)) + if tokenPriceUpdatedRecently && tokenPriceNotChanged { + r.lggr.Debugw("token price was updated recently, skipping the update", + "token", token, "newPrice", medianPrice, "existingPrice", latestTokenPrice.value) + continue // skip the update if we recently had a price update close to the new value + } + } + + tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{ + Token: token, + Value: medianPrice, + }) + } + + // Determinism required. + sort.Slice(tokenPriceUpdates, func(i, j int) bool { + return tokenPriceUpdates[i].Token < tokenPriceUpdates[j].Token + }) + + var gasPriceUpdate []cciptypes.GasPrice + for chainSelector, gasPriceObservations := range gasPriceObs { + newGasPrice, err := r.gasPriceEstimator.Median(gasPriceObservations) // Compute the median price + if err != nil { + return nil, nil, fmt.Errorf("failed to calculate median gas price for chain selector %d: %w", chainSelector, err) + } + + // Default to updating so that we update if there are no prior updates. + latestGasPrice, exists := latestGasPrice[chainSelector] + if exists && latestGasPrice.value != nil { + gasPriceUpdatedRecently := time.Since(latestGasPrice.timestamp) < r.offchainConfig.GasPriceHeartBeat + gasPriceDeviated, err := r.gasPriceEstimator.Deviates(newGasPrice, latestGasPrice.value) + if err != nil { + return nil, nil, err + } + if gasPriceUpdatedRecently && !gasPriceDeviated { + r.lggr.Debugw("gas price was updated recently and not deviated sufficiently, skipping the update", + "chainSelector", chainSelector, "newPrice", newGasPrice, "existingPrice", latestGasPrice.value) + continue + } + } + + gasPriceUpdate = append(gasPriceUpdate, cciptypes.GasPrice{ + DestChainSelector: chainSelector, + Value: newGasPrice, + }) + } + + sort.Slice(gasPriceUpdate, func(i, j int) bool { + return gasPriceUpdate[i].DestChainSelector < gasPriceUpdate[j].DestChainSelector + }) + + return gasPriceUpdate, tokenPriceUpdates, nil +} + +// buildReport assumes there is at least one message in reqs. +func (r *CommitReportingPlugin) buildReport(ctx context.Context, lggr logger.Logger, interval cciptypes.CommitStoreInterval, gasPrices []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) (cciptypes.CommitStoreReport, error) { + // If no messages are needed only include fee updates + if interval.Min == 0 { + return cciptypes.CommitStoreReport{ + TokenPrices: tokenPrices, + GasPrices: gasPrices, + MerkleRoot: [32]byte{}, + Interval: interval, + }, nil + } + + // Logs are guaranteed to be in order of seq num, since these are finalized logs only + // and the contract's seq num is auto-incrementing. + sendRequests, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, interval.Min, interval.Max, true) + if err != nil { + return cciptypes.CommitStoreReport{}, err + } + if len(sendRequests) == 0 { + lggr.Warn("No messages found in interval", + "minSeqNr", interval.Min, + "maxSeqNr", interval.Max) + return cciptypes.CommitStoreReport{}, fmt.Errorf("tried building a tree without leaves") + } + + leaves := make([][32]byte, 0, len(sendRequests)) + var seqNrs []uint64 + for _, req := range sendRequests { + leaves = append(leaves, req.Hash) + seqNrs = append(seqNrs, req.SequenceNumber) + } + if !ccipcalc.ContiguousReqs(lggr, interval.Min, interval.Max, seqNrs) { + return cciptypes.CommitStoreReport{}, errors.Errorf("do not have full range [%v, %v] have %v", interval.Min, interval.Max, seqNrs) + } + tree, err := merklemulti.NewTree(hashutil.NewKeccak(), leaves) + if err != nil { + return cciptypes.CommitStoreReport{}, err + } + + return cciptypes.CommitStoreReport{ + GasPrices: gasPrices, + TokenPrices: tokenPrices, + MerkleRoot: tree.Root(), + Interval: interval, + }, nil +} + +func (r *CommitReportingPlugin) ShouldAcceptFinalizedReport(ctx context.Context, reportTimestamp types.ReportTimestamp, report types.Report) (bool, error) { + parsedReport, err := r.commitStoreReader.DecodeCommitReport(ctx, report) + if err != nil { + return false, err + } + lggr := r.lggr.Named("CommitShouldAcceptFinalizedReport").With( + "merkleRoot", parsedReport.MerkleRoot, + "minSeqNum", parsedReport.Interval.Min, + "maxSeqNum", parsedReport.Interval.Max, + "gasPriceUpdates", parsedReport.GasPrices, + "tokenPriceUpdates", parsedReport.TokenPrices, + "reportTimestamp", reportTimestamp, + ) + // Empty report, should not be put on chain + if parsedReport.MerkleRoot == [32]byte{} && len(parsedReport.GasPrices) == 0 && len(parsedReport.TokenPrices) == 0 { + lggr.Warn("Empty report, should not be put on chain") + return false, nil + } + + if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil { + return false, err1 + } else if !healthy { + return false, ccip.ErrChainIsNotHealthy + } + + if r.isStaleReport(ctx, lggr, parsedReport, reportTimestamp) { + lggr.Infow("Rejecting stale report") + return false, nil + } + + r.metricsCollector.SequenceNumber(ccip.ShouldAccept, parsedReport.Interval.Max) + lggr.Infow("Accepting finalized report", "merkleRoot", hexutil.Encode(parsedReport.MerkleRoot[:])) + return true, nil +} + +// ShouldTransmitAcceptedReport checks if the report is stale, if it is it should not be transmitted. +func (r *CommitReportingPlugin) ShouldTransmitAcceptedReport(ctx context.Context, reportTimestamp types.ReportTimestamp, report types.Report) (bool, error) { + lggr := r.lggr.Named("CommitShouldTransmitAcceptedReport") + parsedReport, err := r.commitStoreReader.DecodeCommitReport(ctx, report) + if err != nil { + return false, err + } + if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil { + return false, err1 + } else if !healthy { + return false, ccip.ErrChainIsNotHealthy + } + // If report is not stale we transmit. + // When the commitTransmitter enqueues the tx for tx manager, + // we mark it as fulfilled, effectively removing it from the set of inflight messages. + shouldTransmit := !r.isStaleReport(ctx, lggr, parsedReport, reportTimestamp) + + lggr.Infow("ShouldTransmitAcceptedReport", + "shouldTransmit", shouldTransmit, + "reportTimestamp", reportTimestamp) + return shouldTransmit, nil +} + +// isStaleReport checks a report to see if the contents have become stale. +// It does so in four ways: +// 1. if there is a merkle root, check if the sequence numbers match up with onchain data +// 2. if there is no merkle root, check if current price's epoch and round is after onchain epoch and round +// 3. if there is a gas price update check to see if the value is different from the last +// reported value +// 4. if there are token prices check to see if the values are different from the last +// reported values. +// +// If there is a merkle root present, staleness is only measured based on the merkle root +// If there is no merkle root but there is a gas update, only this gas update is used for staleness checks. +// If only price updates are included, the price updates are used to check for staleness +// If nothing is included the report is always considered stale. +func (r *CommitReportingPlugin) isStaleReport(ctx context.Context, lggr logger.Logger, report cciptypes.CommitStoreReport, reportTimestamp types.ReportTimestamp) bool { + // If there is a merkle root, ignore all other staleness checks and only check for sequence number staleness + if report.MerkleRoot != [32]byte{} { + return r.isStaleMerkleRoot(ctx, lggr, report.Interval) + } + + hasGasPriceUpdate := len(report.GasPrices) > 0 + hasTokenPriceUpdates := len(report.TokenPrices) > 0 + + // If there is no merkle root, no gas price update and no token price update + // we don't want to write anything on-chain, so we consider this report stale. + if !hasGasPriceUpdate && !hasTokenPriceUpdates { + return true + } + + // We consider a price update as stale when, there isn't an update or there is an update that is stale. + gasPriceStale := !hasGasPriceUpdate || r.isStaleGasPrice(ctx, lggr, report.GasPrices) + tokenPricesStale := !hasTokenPriceUpdates || r.isStaleTokenPrices(ctx, lggr, report.TokenPrices) + + if gasPriceStale && tokenPricesStale { + return true + } + + // If report only has price update, check if its epoch and round lags behind the latest onchain + lastPriceEpochAndRound, err := r.commitStoreReader.GetLatestPriceEpochAndRound(ctx) + if err != nil { + // Assume it's a transient issue getting the last report and try again on the next round + return true + } + + thisEpochAndRound := ccipcalc.MergeEpochAndRound(reportTimestamp.Epoch, reportTimestamp.Round) + return lastPriceEpochAndRound >= thisEpochAndRound +} + +func (r *CommitReportingPlugin) isStaleMerkleRoot(ctx context.Context, lggr logger.Logger, reportInterval cciptypes.CommitStoreInterval) bool { + nextSeqNum, err := r.commitStoreReader.GetExpectedNextSequenceNumber(ctx) + if err != nil { + // Assume it's a transient issue getting the last report and try again on the next round + return true + } + + // The report is not stale and correct only if nextSeqNum == reportInterval.Min. + // Mark it stale if the condition isn't met. + if nextSeqNum != reportInterval.Min { + lggr.Infow("The report is stale because of sequence number mismatch with the commit store interval min value", + "nextSeqNum", nextSeqNum, "reportIntervalMin", reportInterval.Min) + return true + } + + lggr.Infow("Report root is not stale", "nextSeqNum", nextSeqNum, "reportIntervalMin", reportInterval.Min) + + // If a report has root and valid sequence number, the report should be submitted, regardless of price staleness + return false +} + +func (r *CommitReportingPlugin) isStaleGasPrice(ctx context.Context, lggr logger.Logger, gasPriceUpdates []cciptypes.GasPrice) bool { + latestGasPrice, err := r.getLatestGasPriceUpdate(ctx, time.Now()) + if err != nil { + lggr.Errorw("Gas price is stale because getLatestGasPriceUpdate failed", "err", err) + return true + } + + for _, gasPriceUpdate := range gasPriceUpdates { + latestUpdate, exists := latestGasPrice[gasPriceUpdate.DestChainSelector] + if !exists || latestUpdate.value == nil { + lggr.Infow("Found non-stale gas price", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value) + return false + } + + gasPriceDeviated, err := r.gasPriceEstimator.Deviates(gasPriceUpdate.Value, latestUpdate.value) + if err != nil { + lggr.Errorw("Gas price is stale because deviation check failed", "err", err) + return true + } + + if gasPriceDeviated { + lggr.Infow("Found non-stale gas price", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value, "latestUpdate", latestUpdate.value) + return false + } + lggr.Infow("Gas price is stale", "chainSelector", gasPriceUpdate.DestChainSelector, "gasPriceUSd", gasPriceUpdate.Value, "latestGasPrice", latestUpdate.value) + } + + lggr.Infow("All gas prices are stale") + return true +} + +func (r *CommitReportingPlugin) isStaleTokenPrices(ctx context.Context, lggr logger.Logger, priceUpdates []cciptypes.TokenPrice) bool { + // getting the last price updates without including inflight is like querying + // current prices onchain, but uses logpoller's data to save on the RPC requests + latestTokenPriceUpdates, err := r.getLatestTokenPriceUpdates(ctx, time.Now()) + if err != nil { + return true + } + + for _, tokenUpdate := range priceUpdates { + latestUpdate, ok := latestTokenPriceUpdates[tokenUpdate.Token] + priceEqual := ok && !ccipcalc.Deviates(tokenUpdate.Value, latestUpdate.value, int64(r.offchainConfig.TokenPriceDeviationPPB)) + + if !priceEqual { + lggr.Infow("Found non-stale token price", "token", tokenUpdate.Token, "usdPerToken", tokenUpdate.Value, "latestUpdate", latestUpdate.value) + return false + } + lggr.Infow("Token price is stale", "latestTokenPrice", latestUpdate.value, "usdPerToken", tokenUpdate.Value, "token", tokenUpdate.Token) + } + + lggr.Infow("All token prices are stale") + return true +} + +func (r *CommitReportingPlugin) Close() error { + return nil +} diff --git a/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go new file mode 100644 index 00000000000..6cf7e4bec72 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipcommit/ocr2_test.go @@ -0,0 +1,1861 @@ +package ccipcommit + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "math/rand" + "slices" + "sort" + "testing" + "time" + + "github.com/Masterminds/semver/v3" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" + mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + ccipcachemocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + + ccipdbmocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +func TestCommitReportingPlugin_Observation(t *testing.T) { + sourceNativeTokenAddr := ccipcalc.HexToAddress("1000") + destChainSelector := uint64(1) + sourceChainSelector := uint64(2) + + bridgedTokens := []cciptypes.Address{ + ccipcalc.HexToAddress("2000"), + ccipcalc.HexToAddress("3000"), + } + + // Token price of 1e18 token amount in 1e18 USD precision + expectedTokenPrice := map[cciptypes.Address]*big.Int{ + bridgedTokens[0]: big.NewInt(1e10), + bridgedTokens[1]: big.NewInt(2e18), + } + + testCases := []struct { + name string + epochAndRound types.ReportTimestamp + commitStorePaused bool + sourceChainCursed bool + commitStoreSeqNum uint64 + gasPrices map[uint64]*big.Int + tokenPrices map[cciptypes.Address]*big.Int + sendReqs []cciptypes.EVM2EVMMessageWithTxMeta + priceReportingDisabled bool + + expErr bool + expObs ccip.CommitObservation + }{ + { + name: "base report", + commitStoreSeqNum: 54, + gasPrices: map[uint64]*big.Int{ + sourceChainSelector: big.NewInt(2e18), + }, + tokenPrices: expectedTokenPrice, + sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{ + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}}, + }, + expObs: ccip.CommitObservation{ + TokenPricesUSD: expectedTokenPrice, + SourceGasPriceUSD: big.NewInt(2e18), + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector: big.NewInt(2e18), + }, + Interval: cciptypes.CommitStoreInterval{ + Min: 54, + Max: 55, + }, + }, + }, + { + name: "base report with multi-chain gas prices", + commitStoreSeqNum: 54, + gasPrices: map[uint64]*big.Int{ + sourceChainSelector + 1: big.NewInt(2e18), + sourceChainSelector + 2: big.NewInt(3e18), + }, + tokenPrices: expectedTokenPrice, + sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{ + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}}, + }, + expObs: ccip.CommitObservation{ + TokenPricesUSD: expectedTokenPrice, + SourceGasPriceUSD: nil, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector + 1: big.NewInt(2e18), + sourceChainSelector + 2: big.NewInt(3e18), + }, + Interval: cciptypes.CommitStoreInterval{ + Min: 54, + Max: 55, + }, + }, + }, + { + name: "base report with price reporting disabled", + commitStoreSeqNum: 54, + gasPrices: map[uint64]*big.Int{ + sourceChainSelector: big.NewInt(2e18), + }, + tokenPrices: expectedTokenPrice, + sendReqs: []cciptypes.EVM2EVMMessageWithTxMeta{ + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 54}}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 55}}, + }, + priceReportingDisabled: true, + expObs: ccip.CommitObservation{ + TokenPricesUSD: map[cciptypes.Address]*big.Int{}, + SourceGasPriceUSD: nil, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{}, + Interval: cciptypes.CommitStoreInterval{ + Min: 54, + Max: 55, + }, + }, + }, + { + name: "commit store is down", + commitStorePaused: true, + sourceChainCursed: false, + expErr: true, + }, + { + name: "source chain is cursed", + commitStorePaused: false, + sourceChainCursed: true, + expErr: true, + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + commitStoreReader.On("IsDown", ctx).Return(tc.commitStorePaused, nil) + commitStoreReader.On("IsDestChainHealthy", ctx).Return(true, nil) + if !tc.commitStorePaused && !tc.sourceChainCursed { + commitStoreReader.On("GetExpectedNextSequenceNumber", ctx).Return(tc.commitStoreSeqNum, nil) + } + + onRampReader := ccipdatamocks.NewOnRampReader(t) + onRampReader.On("IsSourceChainHealthy", ctx).Return(true, nil) + onRampReader.On("IsSourceCursed", ctx).Return(tc.sourceChainCursed, nil) + if len(tc.sendReqs) > 0 { + onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.commitStoreSeqNum, tc.commitStoreSeqNum+OnRampMessagesScanLimit, true). + Return(tc.sendReqs, nil) + } + + mockPriceService := ccipdbmocks.NewPriceService(t) + mockPriceService.On("GetGasAndTokenPrices", ctx, destChainSelector).Return( + tc.gasPrices, + tc.tokenPrices, + nil, + ).Maybe() + + p := &CommitReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.commitStoreReader = commitStoreReader + p.onRampReader = onRampReader + p.sourceNative = sourceNativeTokenAddr + p.metricsCollector = ccip.NoopMetricsCollector + p.chainHealthcheck = cache.NewChainHealthcheck(p.lggr, onRampReader, commitStoreReader) + p.priceService = mockPriceService + p.destChainSelector = destChainSelector + p.sourceChainSelector = sourceChainSelector + p.offchainConfig = cciptypes.CommitOffchainConfig{ + PriceReportingDisabled: tc.priceReportingDisabled, + } + + obs, err := p.Observation(ctx, tc.epochAndRound, types.Query{}) + + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + if tc.expObs.TokenPricesUSD != nil { + // field ordering in mapping is not guaranteed, if TokenPricesUSD exists, unmarshal to compare mapping + var obsStuct ccip.CommitObservation + err = json.Unmarshal(obs, &obsStuct) + assert.NoError(t, err) + + assert.Equal(t, tc.expObs, obsStuct) + } else { + // if TokenPricesUSD is nil, compare the bytes directly, marshal then unmarshal turns nil map to empty + expObsBytes, err := tc.expObs.Marshal() + assert.NoError(t, err) + assert.Equal(t, expObsBytes, []byte(obs)) + } + }) + } +} + +func TestCommitReportingPlugin_Report(t *testing.T) { + ctx := testutils.Context(t) + sourceChainSelector := uint64(rand.Int()) + var gasPrice = big.NewInt(1) + var gasPrice2 = big.NewInt(2) + gasPriceHeartBeat := *config.MustNewDuration(time.Hour) + + t.Run("not enough observations", func(t *testing.T) { + p := &CommitReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.F = 1 + + chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthcheck.On("IsHealthy", ctx).Return(true, nil).Maybe() + p.chainHealthcheck = chainHealthcheck + + o := ccip.CommitObservation{Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSD: big.NewInt(0)} + obs, err := o.Marshal() + assert.NoError(t, err) + + aos := []types.AttributedObservation{{Observation: obs}} + + gotSomeReport, gotReport, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, aos) + assert.False(t, gotSomeReport) + assert.Nil(t, gotReport) + assert.Error(t, err) + }) + + testCases := []struct { + name string + observations []ccip.CommitObservation + f int + gasPriceUpdates []cciptypes.GasPriceUpdateWithTxMeta + tokenDecimals map[cciptypes.Address]uint8 + tokenPriceUpdates []cciptypes.TokenPriceUpdateWithTxMeta + sendRequests []cciptypes.EVM2EVMMessageWithTxMeta + expCommitReport *cciptypes.CommitStoreReport + expSeqNumRange cciptypes.CommitStoreInterval + expErr bool + }{ + { + name: "base", + observations: []ccip.CommitObservation{ + {Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSDPerChain: map[uint64]*big.Int{sourceChainSelector: gasPrice}}, + {Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, SourceGasPriceUSDPerChain: map[uint64]*big.Int{sourceChainSelector: gasPrice}}, + }, + f: 1, + sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 1, + }, + }, + }, + gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{ + { + GasPriceUpdate: cciptypes.GasPriceUpdate{ + GasPrice: cciptypes.GasPrice{ + DestChainSelector: sourceChainSelector, + Value: big.NewInt(1), + }, + TimestampUnixSec: big.NewInt(time.Now().Add(-2 * gasPriceHeartBeat.Duration()).Unix()), + }, + }, + }, + expSeqNumRange: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + expCommitReport: &cciptypes.CommitStoreReport{ + MerkleRoot: [32]byte{}, + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + TokenPrices: nil, + GasPrices: []cciptypes.GasPrice{{DestChainSelector: sourceChainSelector, Value: gasPrice}}, + }, + expErr: false, + }, + { + name: "observations with mix gas price formats", + observations: []ccip.CommitObservation{ + { + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector: gasPrice, + sourceChainSelector + 1: gasPrice2, + sourceChainSelector + 2: gasPrice2, + }, + }, + { + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector: gasPrice, + sourceChainSelector + 1: gasPrice2, + sourceChainSelector + 2: gasPrice2, + }, + }, + { + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector: gasPrice, + sourceChainSelector + 1: gasPrice2, + }, + }, + { + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + SourceGasPriceUSD: gasPrice, + }, + }, + f: 2, + sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 1, + }, + }, + }, + gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{ + { + GasPriceUpdate: cciptypes.GasPriceUpdate{ + GasPrice: cciptypes.GasPrice{ + DestChainSelector: sourceChainSelector, + Value: big.NewInt(1), + }, + TimestampUnixSec: big.NewInt(time.Now().Add(-2 * gasPriceHeartBeat.Duration()).Unix()), + }, + }, + }, + expSeqNumRange: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + expCommitReport: &cciptypes.CommitStoreReport{ + MerkleRoot: [32]byte{}, + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + TokenPrices: nil, + GasPrices: []cciptypes.GasPrice{ + {DestChainSelector: sourceChainSelector, Value: gasPrice}, + {DestChainSelector: sourceChainSelector + 1, Value: gasPrice2}, + }, + }, + expErr: false, + }, + { + name: "empty", + observations: []ccip.CommitObservation{ + {Interval: cciptypes.CommitStoreInterval{Min: 0, Max: 0}, SourceGasPriceUSD: big.NewInt(0)}, + {Interval: cciptypes.CommitStoreInterval{Min: 0, Max: 0}, SourceGasPriceUSD: big.NewInt(0)}, + }, + gasPriceUpdates: []cciptypes.GasPriceUpdateWithTxMeta{ + { + GasPriceUpdate: cciptypes.GasPriceUpdate{ + GasPrice: cciptypes.GasPrice{ + DestChainSelector: sourceChainSelector, + Value: big.NewInt(0), + }, + TimestampUnixSec: big.NewInt(time.Now().Add(-gasPriceHeartBeat.Duration() / 2).Unix()), + }, + }, + }, + f: 1, + expErr: false, + }, + { + name: "no leaves", + observations: []ccip.CommitObservation{ + {Interval: cciptypes.CommitStoreInterval{Min: 2, Max: 2}, SourceGasPriceUSD: big.NewInt(0)}, + {Interval: cciptypes.CommitStoreInterval{Min: 2, Max: 2}, SourceGasPriceUSD: big.NewInt(0)}, + }, + f: 1, + sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{{}}, + expSeqNumRange: cciptypes.CommitStoreInterval{Min: 2, Max: 2}, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + destPriceRegistryReader := ccipdatamocks.NewPriceRegistryReader(t) + destPriceRegistryReader.On("GetAllGasPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(tc.gasPriceUpdates, nil) + destPriceRegistryReader.On("GetTokenPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(tc.tokenPriceUpdates, nil) + + onRampReader := ccipdatamocks.NewOnRampReader(t) + if len(tc.sendRequests) > 0 { + onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expSeqNumRange.Min, tc.expSeqNumRange.Max, true).Return(tc.sendRequests, nil) + } + + evmEstimator := mocks.NewEvmFeeEstimator(t) + evmEstimator.On("L1Oracle").Return(nil) + gasPriceEstimator := prices.NewDAGasPriceEstimator(evmEstimator, nil, 2e9, 2e9) // 200% deviation + + var destTokens []cciptypes.Address + for tk := range tc.tokenDecimals { + destTokens = append(destTokens, tk) + } + sort.Slice(destTokens, func(i, j int) bool { + return destTokens[i] < destTokens[j] + }) + var destDecimals []uint8 + for _, token := range destTokens { + destDecimals = append(destDecimals, tc.tokenDecimals[token]) + } + + destPriceRegistryReader.On("GetTokensDecimals", ctx, mock.MatchedBy(func(tokens []cciptypes.Address) bool { + for _, token := range tokens { + if !slices.Contains(destTokens, token) { + return false + } + } + return true + })).Return(destDecimals, nil).Maybe() + + lp := mocks2.NewLogPoller(t) + commitStoreReader, err := v1_2_0.NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, lp) + assert.NoError(t, err) + + healthCheck := ccipcachemocks.NewChainHealthcheck(t) + healthCheck.On("IsHealthy", ctx).Return(true, nil) + + p := &CommitReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.destPriceRegistryReader = destPriceRegistryReader + p.onRampReader = onRampReader + p.sourceChainSelector = sourceChainSelector + p.gasPriceEstimator = gasPriceEstimator + p.offchainConfig.GasPriceHeartBeat = gasPriceHeartBeat.Duration() + p.commitStoreReader = commitStoreReader + p.F = tc.f + p.metricsCollector = ccip.NoopMetricsCollector + p.chainHealthcheck = healthCheck + + aos := make([]types.AttributedObservation, 0, len(tc.observations)) + for _, o := range tc.observations { + obs, err2 := o.Marshal() + assert.NoError(t, err2) + aos = append(aos, types.AttributedObservation{Observation: obs}) + } + + gotSomeReport, gotReport, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, aos) + if tc.expErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + + if tc.expCommitReport != nil { + assert.True(t, gotSomeReport) + encodedExpectedReport, err := encodeCommitReport(*tc.expCommitReport) + assert.NoError(t, err) + assert.Equal(t, types.Report(encodedExpectedReport), gotReport) + } + }) + } +} + +func TestCommitReportingPlugin_ShouldAcceptFinalizedReport(t *testing.T) { + ctx := testutils.Context(t) + + newPlugin := func() *CommitReportingPlugin { + p := &CommitReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.metricsCollector = ccip.NoopMetricsCollector + return p + } + + t.Run("report cannot be decoded leads to error", func(t *testing.T) { + p := newPlugin() + + encodedReport := []byte("whatever") + + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + p.commitStoreReader = commitStoreReader + commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport). + Return(cciptypes.CommitStoreReport{}, errors.New("unable to decode report")) + + _, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.Error(t, err) + }) + + t.Run("empty report should not be accepted", func(t *testing.T) { + p := newPlugin() + + report := cciptypes.CommitStoreReport{} + + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + p.commitStoreReader = commitStoreReader + commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil) + + chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthCheck.On("IsHealthy", ctx).Return(true, nil).Maybe() + p.chainHealthcheck = chainHealthCheck + + encodedReport, err := encodeCommitReport(report) + assert.NoError(t, err) + shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.NoError(t, err) + assert.False(t, shouldAccept) + }) + + t.Run("stale report should not be accepted", func(t *testing.T) { + onChainSeqNum := uint64(100) + + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + p := newPlugin() + + p.commitStoreReader = commitStoreReader + + report := cciptypes.CommitStoreReport{ + GasPrices: []cciptypes.GasPrice{{Value: big.NewInt(int64(rand.Int()))}}, + MerkleRoot: [32]byte{123}, // this report is considered non-empty since it has a merkle root + } + + commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil) + commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil) + + chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthCheck.On("IsHealthy", ctx).Return(true, nil) + p.chainHealthcheck = chainHealthCheck + + // stale since report interval is behind on chain seq num + report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum - 2, Max: onChainSeqNum + 10} + encodedReport, err := encodeCommitReport(report) + assert.NoError(t, err) + + shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.NoError(t, err) + assert.False(t, shouldAccept) + }) + + t.Run("non-stale report should be accepted", func(t *testing.T) { + onChainSeqNum := uint64(100) + + p := newPlugin() + + priceRegistryReader := ccipdatamocks.NewPriceRegistryReader(t) + p.destPriceRegistryReader = priceRegistryReader + + p.lggr = logger.TestLogger(t) + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + p.commitStoreReader = commitStoreReader + + report := cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{ + Min: onChainSeqNum, + Max: onChainSeqNum + 10, + }, + TokenPrices: []cciptypes.TokenPrice{ + { + Token: cciptypes.Address(utils.RandomAddress().String()), + Value: big.NewInt(int64(rand.Int())), + }, + }, + GasPrices: []cciptypes.GasPrice{ + { + DestChainSelector: rand.Uint64(), + Value: big.NewInt(int64(rand.Int())), + }, + }, + MerkleRoot: [32]byte{123}, + } + commitStoreReader.On("DecodeCommitReport", mock.Anything, mock.Anything).Return(report, nil) + commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil) + + // non-stale since report interval is not behind on-chain seq num + report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum, Max: onChainSeqNum + 10} + encodedReport, err := encodeCommitReport(report) + assert.NoError(t, err) + + chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthCheck.On("IsHealthy", ctx).Return(true, nil) + p.chainHealthcheck = chainHealthCheck + + shouldAccept, err := p.ShouldAcceptFinalizedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.NoError(t, err) + assert.True(t, shouldAccept) + }) +} + +func TestCommitReportingPlugin_ShouldTransmitAcceptedReport(t *testing.T) { + report := cciptypes.CommitStoreReport{ + TokenPrices: []cciptypes.TokenPrice{ + {Token: cciptypes.Address(utils.RandomAddress().String()), Value: big.NewInt(9e18)}, + }, + GasPrices: []cciptypes.GasPrice{ + { + + DestChainSelector: rand.Uint64(), + Value: big.NewInt(2000e9), + }, + }, + MerkleRoot: [32]byte{123}, + } + + ctx := testutils.Context(t) + p := &CommitReportingPlugin{} + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + onChainSeqNum := uint64(100) + commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(onChainSeqNum, nil) + p.commitStoreReader = commitStoreReader + p.lggr = logger.TestLogger(t) + + chainHealthCheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthCheck.On("IsHealthy", ctx).Return(true, nil).Maybe() + p.chainHealthcheck = chainHealthCheck + + t.Run("should transmit when report is not stale", func(t *testing.T) { + // not-stale since report interval is not behind on chain seq num + report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum, Max: onChainSeqNum + 10} + encodedReport, err := encodeCommitReport(report) + assert.NoError(t, err) + commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport).Return(report, nil).Once() + shouldTransmit, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.NoError(t, err) + assert.True(t, shouldTransmit) + }) + + t.Run("should not transmit when report is stale", func(t *testing.T) { + // stale since report interval is behind on chain seq num + report.Interval = cciptypes.CommitStoreInterval{Min: onChainSeqNum - 2, Max: onChainSeqNum + 10} + encodedReport, err := encodeCommitReport(report) + assert.NoError(t, err) + commitStoreReader.On("DecodeCommitReport", mock.Anything, encodedReport).Return(report, nil).Once() + shouldTransmit, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, encodedReport) + assert.NoError(t, err) + assert.False(t, shouldTransmit) + }) + + t.Run("error when report cannot be decoded", func(t *testing.T) { + reportBytes := []byte("whatever") + commitStoreReader.On("DecodeCommitReport", mock.Anything, reportBytes). + Return(cciptypes.CommitStoreReport{}, errors.New("decode error")).Once() + _, err := p.ShouldTransmitAcceptedReport(ctx, types.ReportTimestamp{}, reportBytes) + assert.Error(t, err) + }) +} + +func TestCommitReportingPlugin_observePriceUpdates(t *testing.T) { + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + + token1 := ccipcalc.HexToAddress("0x123") + token2 := ccipcalc.HexToAddress("0x234") + + gasPrices := map[uint64]*big.Int{ + sourceChainSelector: big.NewInt(1e18), + } + tokenPrices := map[cciptypes.Address]*big.Int{ + token1: big.NewInt(2e18), + token2: big.NewInt(3e18), + } + + testCases := []struct { + name string + psGasPricesResult map[uint64]*big.Int + psTokenPricesResult map[cciptypes.Address]*big.Int + PriceReportingDisabled bool + + expectedGasPrice map[uint64]*big.Int + expectedTokenPrices map[cciptypes.Address]*big.Int + + psError bool + expectedErr bool + }{ + { + name: "ORM called successfully", + psGasPricesResult: gasPrices, + psTokenPricesResult: tokenPrices, + expectedGasPrice: gasPrices, + expectedTokenPrices: tokenPrices, + }, + { + name: "price reporting disabled", + psGasPricesResult: gasPrices, + psTokenPricesResult: tokenPrices, + PriceReportingDisabled: true, + expectedGasPrice: map[uint64]*big.Int{}, + expectedTokenPrices: map[cciptypes.Address]*big.Int{}, + psError: false, + expectedErr: false, + }, + { + name: "price service error", + psGasPricesResult: map[uint64]*big.Int{}, + psTokenPricesResult: map[cciptypes.Address]*big.Int{}, + expectedGasPrice: nil, + expectedTokenPrices: nil, + psError: true, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := tests.Context(t) + + mockPriceService := ccipdbmocks.NewPriceService(t) + var psError error + if tc.psError { + psError = fmt.Errorf("price service error") + } + mockPriceService.On("GetGasAndTokenPrices", ctx, destChainSelector).Return( + tc.psGasPricesResult, + tc.psTokenPricesResult, + psError, + ).Maybe() + + p := &CommitReportingPlugin{ + lggr: logger.TestLogger(t), + destChainSelector: destChainSelector, + sourceChainSelector: sourceChainSelector, + priceService: mockPriceService, + offchainConfig: cciptypes.CommitOffchainConfig{ + PriceReportingDisabled: tc.PriceReportingDisabled, + }, + } + gasPricesUSD, sourceGasPriceUSD, tokenPricesUSD, err := p.observePriceUpdates(ctx) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedGasPrice, gasPricesUSD) + assert.Equal(t, tc.expectedTokenPrices, tokenPricesUSD) + if tc.expectedGasPrice != nil { + assert.Equal(t, tc.expectedGasPrice[sourceChainSelector], sourceGasPriceUSD) + } + } + }) + } +} + +type CommitObservationLegacy struct { + Interval cciptypes.CommitStoreInterval `json:"interval"` + TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"` + SourceGasPriceUSD *big.Int `json:"sourceGasPrice"` +} + +func TestCommitReportingPlugin_extractObservationData(t *testing.T) { + token1 := ccipcalc.HexToAddress("0xa") + token2 := ccipcalc.HexToAddress("0xb") + token1Price := big.NewInt(1) + token2Price := big.NewInt(2) + unsupportedToken := ccipcalc.HexToAddress("0xc") + gasPrice1 := big.NewInt(100) + gasPrice2 := big.NewInt(100) + var sourceChainSelector1 uint64 = 10 + var sourceChainSelector2 uint64 = 20 + + tokenDecimals := make(map[cciptypes.Address]uint8) + tokenDecimals[token1] = 18 + tokenDecimals[token2] = 18 + + validInterval := cciptypes.CommitStoreInterval{Min: 1, Max: 2} + zeroInterval := cciptypes.CommitStoreInterval{Min: 0, Max: 0} + + // mix legacy commit observations with new commit observations to ensure they can work together + legacyObsRaw := CommitObservationLegacy{ + Interval: validInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + token1: token1Price, + token2: token2Price, + }, + SourceGasPriceUSD: gasPrice1, + } + legacyObsBytes, err := json.Marshal(&legacyObsRaw) + assert.NoError(t, err) + + newObsRaw := ccip.CommitObservation{ + Interval: validInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + token1: token1Price, + token2: token2Price, + }, + SourceGasPriceUSD: gasPrice1, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector1: gasPrice1, + sourceChainSelector2: gasPrice2, + }, + } + newObsBytes, err := newObsRaw.Marshal() + assert.NoError(t, err) + + lggr := logger.TestLogger(t) + observations := ccip.GetParsableObservations[ccip.CommitObservation](lggr, []types.AttributedObservation{ + {Observation: legacyObsBytes}, + {Observation: newObsBytes}, + }) + assert.Len(t, observations, 2) + legacyObs := observations[0] + newObs := observations[1] + + obWithNilGasPrice := ccip.CommitObservation{ + Interval: zeroInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + token1: token1Price, + token2: token2Price, + }, + SourceGasPriceUSD: nil, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{}, + } + obWithNilTokenPrice := ccip.CommitObservation{ + Interval: zeroInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + token1: token1Price, + token2: nil, + }, + SourceGasPriceUSD: gasPrice1, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector1: gasPrice1, + sourceChainSelector2: gasPrice2, + }, + } + obMissingTokenPrices := ccip.CommitObservation{ + Interval: zeroInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{}, + SourceGasPriceUSD: gasPrice1, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector1: gasPrice1, + sourceChainSelector2: gasPrice2, + }, + } + obWithUnsupportedToken := ccip.CommitObservation{ + Interval: zeroInterval, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + token1: token1Price, + token2: token2Price, + unsupportedToken: token2Price, + }, + SourceGasPriceUSD: gasPrice1, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + sourceChainSelector1: gasPrice1, + sourceChainSelector2: gasPrice2, + }, + } + obEmpty := ccip.CommitObservation{ + Interval: zeroInterval, + TokenPricesUSD: nil, + SourceGasPriceUSD: nil, + SourceGasPriceUSDPerChain: nil, + } + + testCases := []struct { + name string + commitObservations []ccip.CommitObservation + f int + expIntervals []cciptypes.CommitStoreInterval + expGasPriceObs map[uint64][]*big.Int + expTokenPriceObs map[cciptypes.Address][]*big.Int + expError bool + }{ + { + name: "base", + commitObservations: []ccip.CommitObservation{newObs, newObs, newObs}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price, token2Price}, + }, + expError: false, + }, + { + name: "pass with f=2 and mixed observations", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, legacyObs, newObs, newObs, obWithNilGasPrice}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, validInterval, validInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price, token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price, token2Price, token2Price, token2Price, token2Price}, + }, + expError: false, + }, + { + name: "pass with f=2 and mixed observations with mostly legacy observations", + commitObservations: []ccip.CommitObservation{legacyObs, legacyObs, legacyObs, legacyObs, newObs}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, validInterval, validInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1, gasPrice1}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price, token2Price, token2Price, token2Price}, + }, + expError: false, + }, + { + name: "tolerate 1 faulty obs with f=2", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, legacyObs, obWithNilGasPrice}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, validInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price, token2Price, token2Price}, + }, + expError: false, + }, + { + name: "tolerate 1 nil token price with f=1", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithNilTokenPrice}, + f: 1, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price}, + }, + expError: false, + }, + { + name: "tolerate 1 missing token prices with f=1", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, obMissingTokenPrices}, + f: 1, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price}, + token2: {token2Price, token2Price}, + }, + expError: false, + }, + { + name: "tolerate 1 unsupported token with f=2", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithUnsupportedToken}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price}, + token2: {token2Price, token2Price, token2Price}, + }, + expError: false, + }, + { + name: "tolerate mis-matched token observations with f=2", + commitObservations: []ccip.CommitObservation{legacyObs, newObs, obWithNilTokenPrice, obMissingTokenPrices}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, validInterval, zeroInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{ + token1: {token1Price, token1Price, token1Price}, + }, + expError: false, + }, + { + name: "tolerate all tokens filtered out with f=2", + commitObservations: []ccip.CommitObservation{newObs, obMissingTokenPrices, obMissingTokenPrices}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{validInterval, zeroInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{ + sourceChainSelector1: {gasPrice1, gasPrice1, gasPrice1}, + sourceChainSelector2: {gasPrice2, gasPrice2, gasPrice2}, + }, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{}, + expError: false, + }, + { + name: "not enough observations", + commitObservations: []ccip.CommitObservation{legacyObs, newObs}, + f: 2, + expError: true, + }, + { + name: "too many empty observations", + commitObservations: []ccip.CommitObservation{obWithNilGasPrice, obWithNilTokenPrice, obEmpty, obEmpty, obEmpty}, + f: 2, + expIntervals: []cciptypes.CommitStoreInterval{zeroInterval, zeroInterval, zeroInterval, zeroInterval, zeroInterval}, + expGasPriceObs: map[uint64][]*big.Int{}, + expTokenPriceObs: map[cciptypes.Address][]*big.Int{}, + expError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + intervals, gasPriceOps, tokenPriceOps, err := extractObservationData(logger.TestLogger(t), tc.f, sourceChainSelector1, tc.commitObservations) + + if tc.expError { + assert.Error(t, err) + return + } + assert.Equal(t, tc.expIntervals, intervals) + assert.Equal(t, tc.expGasPriceObs, gasPriceOps) + assert.Equal(t, tc.expTokenPriceObs, tokenPriceOps) + assert.NoError(t, err) + }) + } +} + +func TestCommitReportingPlugin_calculatePriceUpdates(t *testing.T) { + const defaultSourceChainSelector = 10 // we reuse this value across all test cases + feeToken1 := ccipcalc.HexToAddress("0xa") + feeToken2 := ccipcalc.HexToAddress("0xb") + + val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) } + + testCases := []struct { + name string + commitObservations []ccip.CommitObservation + f int + latestGasPrice map[uint64]update + latestTokenPrices map[cciptypes.Address]update + gasPriceHeartBeat config.Duration + daGasPriceDeviationPPB int64 + execGasPriceDeviationPPB int64 + tokenPriceHeartBeat config.Duration + tokenPriceDeviationPPB uint32 + expTokenUpdates []cciptypes.TokenPrice + expGasUpdates []cciptypes.GasPrice + }{ + { + name: "median", + commitObservations: []ccip.CommitObservation{ + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(1)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(2)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(3)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: big.NewInt(4)}}, + }, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-30 * time.Minute), // recent + value: val1e18(9), // median deviates + }, + }, + f: 2, + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(3)}}, + }, + { + name: "gas price update skipped because the latest is similar and was updated recently", + commitObservations: []ccip.CommitObservation{ + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(12)}}, + }, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 20e7, + execGasPriceDeviationPPB: 20e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-30 * time.Minute), // recent + value: val1e18(10), // median deviates + }, + }, + f: 1, + expGasUpdates: nil, + }, + { + name: "gas price update included, the latest is similar but was not updated recently", + commitObservations: []ccip.CommitObservation{ + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}}, + }, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 20e7, + execGasPriceDeviationPPB: 20e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-90 * time.Minute), // stale + value: val1e18(9), // median deviates + }, + }, + f: 1, + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}}, + }, + { + name: "gas price update deviates from latest", + commitObservations: []ccip.CommitObservation{ + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(20)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(20)}}, + }, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 20e7, + execGasPriceDeviationPPB: 20e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-30 * time.Minute), // recent + value: val1e18(11), // latest value close to the update + }, + }, + f: 2, + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(20)}}, + }, + { + name: "multichain gas prices", + commitObservations: []ccip.CommitObservation{ + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(1)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(11)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(111)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(2)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(22)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(222)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(3)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 1: val1e18(33)}}, + {SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector + 2: val1e18(333)}}, + }, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 20e7, + execGasPriceDeviationPPB: 20e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-90 * time.Minute), // stale + value: val1e18(9), // median deviates + }, + defaultSourceChainSelector + 1: { + timestamp: time.Now().Add(-30 * time.Minute), // recent + value: val1e18(20), // median does not deviate + }, + }, + f: 1, + expGasUpdates: []cciptypes.GasPrice{ + {DestChainSelector: defaultSourceChainSelector, Value: val1e18(2)}, + {DestChainSelector: defaultSourceChainSelector + 2, Value: val1e18(222)}, + }, + }, + { + name: "median one token", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(10)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(12)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + }, + f: 1, + expTokenUpdates: []cciptypes.TokenPrice{ + {Token: feeToken1, Value: big.NewInt(12)}, + }, + // We expect a gas update because no latest + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}}, + }, + { + name: "median two tokens", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(10), feeToken2: big.NewInt(13)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: big.NewInt(12), feeToken2: big.NewInt(7)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + }, + f: 1, + expTokenUpdates: []cciptypes.TokenPrice{ + {Token: feeToken1, Value: big.NewInt(12)}, + {Token: feeToken2, Value: big.NewInt(13)}, + }, + // We expect a gas update because no latest + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}}, + }, + { + name: "token price update skipped because it is close to the latest", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(11)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(12)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(0)}, + }, + }, + f: 1, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 20e7, + execGasPriceDeviationPPB: 20e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestTokenPrices: map[cciptypes.Address]update{ + feeToken1: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(10), + }, + }, + // We expect a gas update because no latest + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: big.NewInt(0)}}, + }, + { + name: "gas price and token price both included because they are not close to the latest", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + defaultSourceChainSelector: val1e18(10), + defaultSourceChainSelector + 1: val1e18(20), + }, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + defaultSourceChainSelector: val1e18(11), + defaultSourceChainSelector + 1: val1e18(21), + }, + }, + }, + f: 1, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 10e7, + execGasPriceDeviationPPB: 10e7, + tokenPriceHeartBeat: *config.MustNewDuration(time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(9), + }, + defaultSourceChainSelector + 1: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(9), + }, + }, + latestTokenPrices: map[cciptypes.Address]update{ + feeToken1: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(9), + }, + }, + expTokenUpdates: []cciptypes.TokenPrice{ + {Token: feeToken1, Value: val1e18(21)}, + }, + expGasUpdates: []cciptypes.GasPrice{ + {DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}, + {DestChainSelector: defaultSourceChainSelector + 1, Value: val1e18(21)}, + }, + }, + { + name: "gas price and token price both included because they not been updated recently", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + defaultSourceChainSelector: val1e18(10), + defaultSourceChainSelector + 1: val1e18(20), + }, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + defaultSourceChainSelector: val1e18(11), + defaultSourceChainSelector + 1: val1e18(21), + }, + }, + }, + f: 1, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 10e7, + execGasPriceDeviationPPB: 10e7, + tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-90 * time.Minute), + value: val1e18(11), + }, + defaultSourceChainSelector + 1: { + timestamp: time.Now().Add(-90 * time.Minute), + value: val1e18(21), + }, + }, + latestTokenPrices: map[cciptypes.Address]update{ + feeToken1: { + timestamp: time.Now().Add(-4 * time.Hour), + value: val1e18(21), + }, + }, + expTokenUpdates: []cciptypes.TokenPrice{ + {Token: feeToken1, Value: val1e18(21)}, + }, + expGasUpdates: []cciptypes.GasPrice{ + {DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}, + {DestChainSelector: defaultSourceChainSelector + 1, Value: val1e18(21)}, + }, + }, + { + name: "gas price included because it deviates from latest and token price skipped because it does not deviate", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}, + }, + }, + f: 1, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 10e7, + execGasPriceDeviationPPB: 10e7, + tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour), + tokenPriceDeviationPPB: 200e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-90 * time.Minute), + value: val1e18(9), + }, + }, + latestTokenPrices: map[cciptypes.Address]update{ + feeToken1: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(9), + }, + }, + expGasUpdates: []cciptypes.GasPrice{{DestChainSelector: defaultSourceChainSelector, Value: val1e18(11)}}, + }, + { + name: "gas price skipped because it does not deviate and token price included because it has not been updated recently", + commitObservations: []ccip.CommitObservation{ + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(20)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(10)}, + }, + { + TokenPricesUSD: map[cciptypes.Address]*big.Int{feeToken1: val1e18(21)}, + SourceGasPriceUSDPerChain: map[uint64]*big.Int{defaultSourceChainSelector: val1e18(11)}, + }, + }, + f: 1, + gasPriceHeartBeat: *config.MustNewDuration(time.Hour), + daGasPriceDeviationPPB: 10e7, + execGasPriceDeviationPPB: 10e7, + tokenPriceHeartBeat: *config.MustNewDuration(2 * time.Hour), + tokenPriceDeviationPPB: 20e7, + latestGasPrice: map[uint64]update{ + defaultSourceChainSelector: { + timestamp: time.Now().Add(-30 * time.Minute), + value: val1e18(11), + }, + }, + latestTokenPrices: map[cciptypes.Address]update{ + feeToken1: { + timestamp: time.Now().Add(-4 * time.Hour), + value: val1e18(21), + }, + }, + expTokenUpdates: []cciptypes.TokenPrice{ + {Token: feeToken1, Value: val1e18(21)}, + }, + expGasUpdates: nil, + }, + } + + evmEstimator := mocks.NewEvmFeeEstimator(t) + evmEstimator.On("L1Oracle").Return(nil) + estimatorCSVer, _ := semver.NewVersion("1.2.0") + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + estimator, _ := prices.NewGasPriceEstimatorForCommitPlugin( + *estimatorCSVer, + evmEstimator, + nil, + tc.daGasPriceDeviationPPB, + tc.execGasPriceDeviationPPB, + ) + + r := &CommitReportingPlugin{ + lggr: logger.TestLogger(t), + sourceChainSelector: defaultSourceChainSelector, + offchainConfig: cciptypes.CommitOffchainConfig{ + GasPriceHeartBeat: tc.gasPriceHeartBeat.Duration(), + TokenPriceHeartBeat: tc.tokenPriceHeartBeat.Duration(), + TokenPriceDeviationPPB: tc.tokenPriceDeviationPPB, + }, + gasPriceEstimator: estimator, + F: tc.f, + } + + gasPriceObs := make(map[uint64][]*big.Int) + tokenPriceObs := make(map[cciptypes.Address][]*big.Int) + for _, obs := range tc.commitObservations { + for selector, price := range obs.SourceGasPriceUSDPerChain { + gasPriceObs[selector] = append(gasPriceObs[selector], price) + } + for token, price := range obs.TokenPricesUSD { + tokenPriceObs[token] = append(tokenPriceObs[token], price) + } + } + + gotGas, gotTokens, err := r.calculatePriceUpdates(gasPriceObs, tokenPriceObs, tc.latestGasPrice, tc.latestTokenPrices) + + assert.Equal(t, tc.expGasUpdates, gotGas) + assert.Equal(t, tc.expTokenUpdates, gotTokens) + assert.NoError(t, err) + }) + } +} + +func TestCommitReportingPlugin_isStaleReport(t *testing.T) { + ctx := context.Background() + lggr := logger.TestLogger(t) + merkleRoot1 := utils.Keccak256Fixed([]byte("some merkle root 1")) + + t.Run("empty report", func(t *testing.T) { + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + r := &CommitReportingPlugin{commitStoreReader: commitStoreReader} + isStale := r.isStaleReport(ctx, lggr, cciptypes.CommitStoreReport{}, types.ReportTimestamp{}) + assert.True(t, isStale) + }) + + t.Run("merkle root", func(t *testing.T) { + const expNextSeqNum = uint64(9) + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(expNextSeqNum, nil) + + r := &CommitReportingPlugin{ + commitStoreReader: commitStoreReader, + } + + testCases := map[string]struct { + interval cciptypes.CommitStoreInterval + result bool + }{ + "The nextSeqNumber is equal to the commit store interval Min value": { + interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum, Max: expNextSeqNum + 10}, + result: false, + }, + "The nextSeqNumber is less than the commit store interval Min value": { + interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum + 1, Max: expNextSeqNum + 10}, + result: true, + }, + "The nextSeqNumber is greater than the commit store interval Min value": { + interval: cciptypes.CommitStoreInterval{Min: expNextSeqNum - 1, Max: expNextSeqNum + 10}, + result: true, + }, + "Empty interval": { + interval: cciptypes.CommitStoreInterval{}, + result: true, + }, + } + + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + assert.Equal(t, tc.result, r.isStaleReport(ctx, lggr, cciptypes.CommitStoreReport{ + MerkleRoot: merkleRoot1, + Interval: tc.interval, + }, types.ReportTimestamp{})) + }) + } + }) +} + +func TestCommitReportingPlugin_calculateMinMaxSequenceNumbers(t *testing.T) { + testCases := []struct { + name string + commitStoreSeqNum uint64 + msgSeqNums []uint64 + + expQueryMin uint64 // starting seq num that is used in the query to get messages + expMin uint64 + expMax uint64 + expErr bool + }{ + { + name: "happy flow", + commitStoreSeqNum: 9, + msgSeqNums: []uint64{11, 12, 13, 14}, + expQueryMin: 9, + expMin: 11, + expMax: 14, + expErr: false, + }, + { + name: "happy flow 2", + commitStoreSeqNum: 9, + msgSeqNums: []uint64{11, 12, 13, 14}, + expQueryMin: 9, // from commit store + expMin: 11, + expMax: 14, + expErr: false, + }, + { + name: "gap in msg seq nums", + commitStoreSeqNum: 10, + expQueryMin: 10, + msgSeqNums: []uint64{11, 12, 14}, + expErr: true, + }, + { + name: "no new messages", + commitStoreSeqNum: 9, + msgSeqNums: []uint64{}, + expQueryMin: 9, + expMin: 0, + expMax: 0, + expErr: false, + }, + { + name: "unordered seq nums", + commitStoreSeqNum: 9, + msgSeqNums: []uint64{11, 13, 14, 10}, + expQueryMin: 9, + expErr: true, + }, + } + + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := &CommitReportingPlugin{} + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + commitStoreReader.On("GetExpectedNextSequenceNumber", mock.Anything).Return(tc.commitStoreSeqNum, nil) + p.commitStoreReader = commitStoreReader + + onRampReader := ccipdatamocks.NewOnRampReader(t) + var sendReqs []cciptypes.EVM2EVMMessageWithTxMeta + for _, seqNum := range tc.msgSeqNums { + sendReqs = append(sendReqs, cciptypes.EVM2EVMMessageWithTxMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: seqNum, + }, + }) + } + onRampReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMin+OnRampMessagesScanLimit, true).Return(sendReqs, nil) + p.onRampReader = onRampReader + + minSeqNum, maxSeqNum, _, err := p.calculateMinMaxSequenceNumbers(ctx, lggr) + if tc.expErr { + assert.Error(t, err) + return + } + + assert.Equal(t, tc.expMin, minSeqNum) + assert.Equal(t, tc.expMax, maxSeqNum) + }) + } +} + +func TestCommitReportingPlugin_getLatestGasPriceUpdate(t *testing.T) { + now := time.Now() + chainSelector1 := uint64(1234) + chainSelector2 := uint64(5678) + + chain1Value := big.NewInt(1000) + chain2Value := big.NewInt(2000) + + testCases := []struct { + name string + priceRegistryUpdates []cciptypes.GasPriceUpdate + expUpdates map[uint64]update + expErr bool + }{ + { + name: "happy path", + priceRegistryUpdates: []cciptypes.GasPriceUpdate{ + { + GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: chain1Value}, + TimestampUnixSec: big.NewInt(now.Unix()), + }, + }, + expUpdates: map[uint64]update{chainSelector1: {timestamp: now, value: chain1Value}}, + expErr: false, + }, + { + name: "happy path multiple updates", + priceRegistryUpdates: []cciptypes.GasPriceUpdate{ + { + GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: big.NewInt(1)}, + TimestampUnixSec: big.NewInt(now.Unix()), + }, + { + GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector2, Value: big.NewInt(1)}, + TimestampUnixSec: big.NewInt(now.Add(1 * time.Minute).Unix()), + }, + { + GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector2, Value: chain2Value}, + TimestampUnixSec: big.NewInt(now.Add(2 * time.Minute).Unix()), + }, + { + GasPrice: cciptypes.GasPrice{DestChainSelector: chainSelector1, Value: chain1Value}, + TimestampUnixSec: big.NewInt(now.Add(3 * time.Minute).Unix()), + }, + }, + expUpdates: map[uint64]update{ + chainSelector1: {timestamp: now.Add(3 * time.Minute), value: chain1Value}, + chainSelector2: {timestamp: now.Add(2 * time.Minute), value: chain2Value}, + }, + expErr: false, + }, + } + + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := &CommitReportingPlugin{} + p.lggr = lggr + priceReg := ccipdatamocks.NewPriceRegistryReader(t) + p.destPriceRegistryReader = priceReg + + var events []cciptypes.GasPriceUpdateWithTxMeta + for _, update := range tc.priceRegistryUpdates { + events = append(events, cciptypes.GasPriceUpdateWithTxMeta{ + GasPriceUpdate: update, + }) + } + + priceReg.On("GetAllGasPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(events, nil) + + gotUpdates, err := p.getLatestGasPriceUpdate(ctx, now) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, len(tc.expUpdates), len(gotUpdates)) + for selector, gotUpdate := range gotUpdates { + assert.Equal(t, tc.expUpdates[selector].timestamp.Truncate(time.Second), gotUpdate.timestamp.Truncate(time.Second)) + assert.Equal(t, tc.expUpdates[selector].value.Uint64(), gotUpdate.value.Uint64()) + } + }) + } +} + +func TestCommitReportingPlugin_getLatestTokenPriceUpdates(t *testing.T) { + now := time.Now() + tk1 := cciptypes.Address(utils.RandomAddress().String()) + tk2 := cciptypes.Address(utils.RandomAddress().String()) + + testCases := []struct { + name string + priceRegistryUpdates []cciptypes.TokenPriceUpdate + expUpdates map[cciptypes.Address]update + expErr bool + }{ + { + name: "happy path", + priceRegistryUpdates: []cciptypes.TokenPriceUpdate{ + { + TokenPrice: cciptypes.TokenPrice{ + Token: tk1, + Value: big.NewInt(1000), + }, + TimestampUnixSec: big.NewInt(now.Add(1 * time.Minute).Unix()), + }, + { + TokenPrice: cciptypes.TokenPrice{ + Token: tk2, + Value: big.NewInt(2000), + }, + TimestampUnixSec: big.NewInt(now.Add(2 * time.Minute).Unix()), + }, + }, + expUpdates: map[cciptypes.Address]update{ + tk1: {timestamp: now.Add(1 * time.Minute), value: big.NewInt(1000)}, + tk2: {timestamp: now.Add(2 * time.Minute), value: big.NewInt(2000)}, + }, + expErr: false, + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := &CommitReportingPlugin{} + + priceReg := ccipdatamocks.NewPriceRegistryReader(t) + p.destPriceRegistryReader = priceReg + + var events []cciptypes.TokenPriceUpdateWithTxMeta + for _, up := range tc.priceRegistryUpdates { + events = append(events, cciptypes.TokenPriceUpdateWithTxMeta{ + TokenPriceUpdate: up, + }) + } + + priceReg.On("GetTokenPriceUpdatesCreatedAfter", ctx, mock.Anything, 0).Return(events, nil) + + updates, err := p.getLatestTokenPriceUpdates(ctx, now) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, len(tc.expUpdates), len(updates)) + for k, v := range updates { + assert.Equal(t, tc.expUpdates[k].timestamp.Truncate(time.Second), v.timestamp.Truncate(time.Second)) + assert.Equal(t, tc.expUpdates[k].value.Uint64(), v.value.Uint64()) + } + }) + } +} + +func Test_commitReportSize(t *testing.T) { + testParams := gopter.DefaultTestParameters() + testParams.MinSuccessfulTests = 100 + p := gopter.NewProperties(testParams) + p.Property("bounded commit report size", prop.ForAll(func(root []byte, min, max uint64) bool { + var root32 [32]byte + copy(root32[:], root) + rep, err := encodeCommitReport(cciptypes.CommitStoreReport{ + MerkleRoot: root32, + Interval: cciptypes.CommitStoreInterval{Min: min, Max: max}, + TokenPrices: []cciptypes.TokenPrice{}, + GasPrices: []cciptypes.GasPrice{ + { + DestChainSelector: 1337, + Value: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9 + }, + }, + }) + require.NoError(t, err) + return len(rep) <= MaxCommitReportLength + }, gen.SliceOfN(32, gen.UInt8()), gen.UInt64(), gen.UInt64())) + p.TestingRun(t) +} + +func Test_calculateIntervalConsensus(t *testing.T) { + tests := []struct { + name string + intervals []cciptypes.CommitStoreInterval + rangeLimit uint64 + f int + wantMin uint64 + wantMax uint64 + wantErr bool + }{ + {"no obs", []cciptypes.CommitStoreInterval{{Min: 0, Max: 0}}, 0, 0, 0, 0, false}, + {"basic", []cciptypes.CommitStoreInterval{ + {Min: 9, Max: 14}, + {Min: 10, Max: 12}, + {Min: 10, Max: 14}, + }, 0, 1, 10, 14, false}, + {"min > max", []cciptypes.CommitStoreInterval{ + {Min: 9, Max: 4}, + {Min: 10, Max: 4}, + {Min: 10, Max: 6}, + }, 0, 1, 0, 0, true}, + { + "range limit", []cciptypes.CommitStoreInterval{ + {Min: 10, Max: 100}, + {Min: 1, Max: 1000}, + }, 256, 1, 10, 265, false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := calculateIntervalConsensus(tt.intervals, tt.f, tt.rangeLimit) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + assert.Equal(t, tt.wantMin, got.Min) + assert.Equal(t, tt.wantMax, got.Max) + }) + } +} + +func TestCommitReportToEthTxMeta(t *testing.T) { + mctx := hashutil.NewKeccak() + tree, err := merklemulti.NewTree(mctx, [][32]byte{mctx.Hash([]byte{0xaa})}) + require.NoError(t, err) + + tests := []struct { + name string + min, max uint64 + expectedRange []uint64 + }{ + { + "happy flow", + 1, 10, + []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + "same sequence", + 1, 1, + []uint64{1}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + report := cciptypes.CommitStoreReport{ + TokenPrices: []cciptypes.TokenPrice{}, + GasPrices: []cciptypes.GasPrice{ + { + DestChainSelector: uint64(1337), + Value: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9 + }, + }, + MerkleRoot: tree.Root(), + Interval: cciptypes.CommitStoreInterval{Min: tc.min, Max: tc.max}, + } + out, err := encodeCommitReport(report) + require.NoError(t, err) + + fn, err := factory.CommitReportToEthTxMeta(ccipconfig.CommitStore, *semver.MustParse("1.0.0")) + require.NoError(t, err) + txMeta, err := fn(out) + require.NoError(t, err) + require.NotNil(t, txMeta) + require.EqualValues(t, tc.expectedRange, txMeta.SeqNumbers) + }) + } +} + +// TODO should be removed, tests need to be updated to use the Reader interface. +// encodeCommitReport is only used in tests +func encodeCommitReport(report cciptypes.CommitStoreReport) ([]byte, error) { + commitStoreABI := abihelpers.MustParseABI(commit_store.CommitStoreABI) + return v1_2_0.EncodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report) +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/batching.go b/core/services/ocr2/plugins/ccip/ccipexec/batching.go new file mode 100644 index 00000000000..b457dd986d4 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/batching.go @@ -0,0 +1,540 @@ +package ccipexec + +import ( + "context" + "fmt" + "math/big" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + "github.com/smartcontractkit/chainlink-common/pkg/types" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker" +) + +type BatchContext struct { + report commitReportWithSendRequests + inflight []InflightInternalExecutionReport + inflightAggregateValue *big.Int + lggr logger.Logger + availableDataLen int + availableGas uint64 + expectedNonces map[cciptypes.Address]uint64 + sendersNonce map[cciptypes.Address]uint64 + sourceTokenPricesUSD map[cciptypes.Address]*big.Int + destTokenPricesUSD map[cciptypes.Address]*big.Int + gasPrice *big.Int + sourceToDestToken map[cciptypes.Address]cciptypes.Address + aggregateTokenLimit *big.Int + tokenDataRemainingDuration time.Duration + tokenDataWorker tokendata.Worker + gasPriceEstimator prices.GasPriceEstimatorExec + destWrappedNative cciptypes.Address + offchainConfig cciptypes.ExecOffchainConfig +} + +type BatchingStrategy interface { + BuildBatch(ctx context.Context, batchCtx *BatchContext) ([]ccip.ObservedMessage, []messageExecStatus) +} + +type BestEffortBatchingStrategy struct{} + +type ZKOverflowBatchingStrategy struct { + statuschecker statuschecker.CCIPTransactionStatusChecker +} + +func NewBatchingStrategy(batchingStrategyID uint32, statusChecker statuschecker.CCIPTransactionStatusChecker) (BatchingStrategy, error) { + var batchingStrategy BatchingStrategy + switch batchingStrategyID { + case 0: + batchingStrategy = &BestEffortBatchingStrategy{} + case 1: + batchingStrategy = &ZKOverflowBatchingStrategy{ + statuschecker: statusChecker, + } + default: + return nil, errors.Errorf("unknown batching strategy ID %d", batchingStrategyID) + } + return batchingStrategy, nil +} + +// BestEffortBatchingStrategy is a batching strategy that tries to batch as many messages as possible (up to certain limits). +func (s *BestEffortBatchingStrategy) BuildBatch( + ctx context.Context, + batchCtx *BatchContext, +) ([]ccip.ObservedMessage, []messageExecStatus) { + batchBuilder := newBatchBuildContainer(len(batchCtx.report.sendRequestsWithMeta)) + for _, msg := range batchCtx.report.sendRequestsWithMeta { + msgLggr := batchCtx.lggr.With("messageID", hexutil.Encode(msg.MessageID[:]), "seqNr", msg.SequenceNumber) + status, messageMaxGas, tokenData, msgValue, err := performCommonChecks(ctx, batchCtx, msg, msgLggr) + + if err != nil { + return []ccip.ObservedMessage{}, []messageExecStatus{} + } + + if status.shouldBeSkipped() { + batchBuilder.skip(msg, status) + continue + } + + updateBatchContext(batchCtx, msg, messageMaxGas, msgValue, msgLggr) + batchBuilder.addToBatch(msg, tokenData) + } + return batchBuilder.batch, batchBuilder.statuses +} + +// ZKOverflowBatchingStrategy is a batching strategy for ZK chains overflowing under certain conditions. +// It is a simple batching strategy that only allows one message to be added to the batch. +// TXM is used to perform the ZK check: if the message failed the check, it will be skipped. +func (bs ZKOverflowBatchingStrategy) BuildBatch( + ctx context.Context, + batchCtx *BatchContext, +) ([]ccip.ObservedMessage, []messageExecStatus) { + batchBuilder := newBatchBuildContainer(len(batchCtx.report.sendRequestsWithMeta)) + inflightSeqNums := getInflightSeqNums(batchCtx.inflight) + + for _, msg := range batchCtx.report.sendRequestsWithMeta { + msgId := hexutil.Encode(msg.MessageID[:]) + msgLggr := batchCtx.lggr.With("messageID", msgId, "seqNr", msg.SequenceNumber) + + // Check if msg is inflight + if exists := inflightSeqNums.Contains(msg.SequenceNumber); exists { + // Message is inflight, skip it + msgLggr.Infow("Skipping message - already inflight", "message", msgId) + batchBuilder.skip(msg, SkippedInflight) + continue + } + // Message is not inflight, continue with checks + // Check if the messsage is overflown using TXM + statuses, count, err := bs.statuschecker.CheckMessageStatus(ctx, msgId) + if err != nil { + batchBuilder.skip(msg, TXMCheckError) + continue + } + + msgLggr.Infow("TXM check result", "statuses", statuses, "count", count) + + if len(statuses) == 0 { + // No status found for message = first time we see it + msgLggr.Infow("No status found for message - proceeding with checks", "message", msgId) + } else { + // Status(es) found for message = check if any of them is final to decide if we should add it to the batch + hasFatalStatus := false + for _, s := range statuses { + if s == types.Fatal { + msgLggr.Infow("Skipping message - found a fatal TXM status", "message", msgId) + batchBuilder.skip(msg, TXMFatalStatus) + hasFatalStatus = true + break + } + } + if hasFatalStatus { + continue + } + msgLggr.Infow("No fatal status found for message - proceeding with checks", "message", msgId) + } + + status, messageMaxGas, tokenData, msgValue, err := performCommonChecks(ctx, batchCtx, msg, msgLggr) + + if err != nil { + return []ccip.ObservedMessage{}, []messageExecStatus{} + } + + if status.shouldBeSkipped() { + batchBuilder.skip(msg, status) + continue + } + + updateBatchContext(batchCtx, msg, messageMaxGas, msgValue, msgLggr) + msgLggr.Infow("Adding message to batch", "message", msgId) + batchBuilder.addToBatch(msg, tokenData) + + // Batch size is limited to 1 for ZK Overflow chains + break + } + return batchBuilder.batch, batchBuilder.statuses +} + +func performCommonChecks( + ctx context.Context, + batchCtx *BatchContext, + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, + msgLggr logger.Logger, +) (messageStatus, uint64, [][]byte, *big.Int, error) { + if msg.Executed { + msgLggr.Infow("Skipping message - already executed") + return AlreadyExecuted, 0, nil, nil, nil + } + + if len(msg.Data) > batchCtx.availableDataLen { + msgLggr.Infow("Skipping message - insufficient remaining batch data length", "msgDataLen", len(msg.Data), "availableBatchDataLen", batchCtx.availableDataLen) + return InsufficientRemainingBatchDataLength, 0, nil, nil, nil + } + + messageMaxGas, err1 := calculateMessageMaxGas( + msg.GasLimit, + len(batchCtx.report.sendRequestsWithMeta), + len(msg.Data), + len(msg.TokenAmounts), + ) + if err1 != nil { + msgLggr.Errorw("Skipping message - message max gas calculation error", "err", err1) + return MessageMaxGasCalcError, 0, nil, nil, nil + } + + // Check sufficient gas in batch + if batchCtx.availableGas < messageMaxGas { + msgLggr.Infow("Skipping message - insufficient remaining batch gas limit", "availableGas", batchCtx.availableGas, "messageMaxGas", messageMaxGas) + return InsufficientRemainingBatchGas, 0, nil, nil, nil + } + + if _, ok := batchCtx.expectedNonces[msg.Sender]; !ok { + nonce, ok1 := batchCtx.sendersNonce[msg.Sender] + if !ok1 { + msgLggr.Errorw("Skipping message - missing nonce", "sender", msg.Sender) + return MissingNonce, 0, nil, nil, nil + } + batchCtx.expectedNonces[msg.Sender] = nonce + 1 + } + + // Check expected nonce is valid for sequenced messages. + // Sequenced messages have non-zero nonces. + if msg.Nonce > 0 && msg.Nonce != batchCtx.expectedNonces[msg.Sender] { + msgLggr.Warnw("Skipping message - invalid nonce", "have", msg.Nonce, "want", batchCtx.expectedNonces[msg.Sender]) + return InvalidNonce, 0, nil, nil, nil + } + + msgValue, err1 := aggregateTokenValue(batchCtx.lggr, batchCtx.destTokenPricesUSD, batchCtx.sourceToDestToken, msg.TokenAmounts) + if err1 != nil { + msgLggr.Errorw("Skipping message - aggregate token value compute error", "err", err1) + return AggregateTokenValueComputeError, 0, nil, nil, nil + } + + // if token limit is smaller than message value skip message + if tokensLeft, hasCapacity := hasEnoughTokens(batchCtx.aggregateTokenLimit, msgValue, batchCtx.inflightAggregateValue); !hasCapacity { + msgLggr.Warnw("Skipping message - aggregate token limit exceeded", "aggregateTokenLimit", tokensLeft.String(), "msgValue", msgValue.String()) + return AggregateTokenLimitExceeded, 0, nil, nil, nil + } + + tokenData, elapsed, err1 := getTokenDataWithTimeout(ctx, msg, batchCtx.tokenDataRemainingDuration, batchCtx.tokenDataWorker) + batchCtx.tokenDataRemainingDuration -= elapsed + if err1 != nil { + if errors.Is(err1, tokendata.ErrNotReady) { + msgLggr.Warnw("Skipping message - token data not ready", "err", err1) + return TokenDataNotReady, 0, nil, nil, nil + } + msgLggr.Errorw("Skipping message - token data fetch error", "err", err1) + return TokenDataFetchError, 0, nil, nil, nil + } + + dstWrappedNativePrice, exists := batchCtx.destTokenPricesUSD[batchCtx.destWrappedNative] + if !exists { + msgLggr.Errorw("Skipping message - token not in destination token prices", "token", batchCtx.destWrappedNative) + return TokenNotInDestTokenPrices, 0, nil, nil, nil + } + + // calculating the source chain fee, dividing by 1e18 for denomination. + // For example: + // FeeToken=link; FeeTokenAmount=1e17 i.e. 0.1 link, price is 6e18 USD/link (1 USD = 1e18), + // availableFee is 1e17*6e18/1e18 = 6e17 = 0.6 USD + sourceFeeTokenPrice, exists := batchCtx.sourceTokenPricesUSD[msg.FeeToken] + if !exists { + msgLggr.Errorw("Skipping message - token not in source token prices", "token", msg.FeeToken) + return TokenNotInSrcTokenPrices, 0, nil, nil, nil + } + + // Fee boosting + execCostUsd, err1 := batchCtx.gasPriceEstimator.EstimateMsgCostUSD(batchCtx.gasPrice, dstWrappedNativePrice, msg) + if err1 != nil { + msgLggr.Errorw("Failed to estimate message cost USD", "err", err1) + return "", 0, nil, nil, errors.New("failed to estimate message cost USD") + } + + availableFee := big.NewInt(0).Mul(msg.FeeTokenAmount, sourceFeeTokenPrice) + availableFee = availableFee.Div(availableFee, big.NewInt(1e18)) + availableFeeUsd := waitBoostedFee(time.Since(msg.BlockTimestamp), availableFee, batchCtx.offchainConfig.RelativeBoostPerWaitHour) + if availableFeeUsd.Cmp(execCostUsd) < 0 { + msgLggr.Infow( + "Skipping message - insufficient remaining fee", + "availableFeeUsd", availableFeeUsd, + "execCostUsd", execCostUsd, + "sourceBlockTimestamp", msg.BlockTimestamp, + "waitTime", time.Since(msg.BlockTimestamp), + "boost", batchCtx.offchainConfig.RelativeBoostPerWaitHour, + ) + return InsufficientRemainingFee, 0, nil, nil, nil + } + + return SuccesfullyValidated, messageMaxGas, tokenData, msgValue, nil +} + +// getTokenDataWithCappedLatency gets the token data for the provided message. +// Stops and returns an error if more than allowedWaitingTime is passed. +func getTokenDataWithTimeout( + ctx context.Context, + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, + timeout time.Duration, + tokenDataWorker tokendata.Worker, +) ([][]byte, time.Duration, error) { + if len(msg.TokenAmounts) == 0 { + return nil, 0, nil + } + + ctxTimeout, cf := context.WithTimeout(ctx, timeout) + defer cf() + tStart := time.Now() + tokenData, err := tokenDataWorker.GetMsgTokenData(ctxTimeout, msg) + tDur := time.Since(tStart) + return tokenData, tDur, err +} + +func getProofData( + ctx context.Context, + sourceReader ccipdata.OnRampReader, + interval cciptypes.CommitStoreInterval, +) (sendReqsInRoot []cciptypes.EVM2EVMMessageWithTxMeta, leaves [][32]byte, tree *merklemulti.Tree[[32]byte], err error) { + // We don't need to double-check if logs are finalized because we already checked that in the Commit phase. + sendReqs, err := sourceReader.GetSendRequestsBetweenSeqNums(ctx, interval.Min, interval.Max, false) + if err != nil { + return nil, nil, nil, err + } + + if err1 := validateSendRequests(sendReqs, interval); err1 != nil { + return nil, nil, nil, err1 + } + + leaves = make([][32]byte, 0, len(sendReqs)) + for _, req := range sendReqs { + leaves = append(leaves, req.Hash) + } + tree, err = merklemulti.NewTree(hashutil.NewKeccak(), leaves) + if err != nil { + return nil, nil, nil, err + } + return sendReqs, leaves, tree, nil +} + +func validateSendRequests(sendReqs []cciptypes.EVM2EVMMessageWithTxMeta, interval cciptypes.CommitStoreInterval) error { + if len(sendReqs) == 0 { + return fmt.Errorf("could not find any requests in the provided interval %v", interval) + } + + gotInterval := cciptypes.CommitStoreInterval{ + Min: sendReqs[0].SequenceNumber, + Max: sendReqs[0].SequenceNumber, + } + + for _, req := range sendReqs[1:] { + if req.SequenceNumber < gotInterval.Min { + gotInterval.Min = req.SequenceNumber + } + if req.SequenceNumber > gotInterval.Max { + gotInterval.Max = req.SequenceNumber + } + } + + if (gotInterval.Min != interval.Min) || (gotInterval.Max != interval.Max) { + return fmt.Errorf("interval %v is not the expected %v", gotInterval, interval) + } + return nil +} + +func getInflightSeqNums(inflight []InflightInternalExecutionReport) mapset.Set[uint64] { + seqNums := mapset.NewSet[uint64]() + for _, report := range inflight { + for _, msg := range report.messages { + seqNums.Add(msg.SequenceNumber) + } + } + return seqNums +} + +func aggregateTokenValue(lggr logger.Logger, destTokenPricesUSD map[cciptypes.Address]*big.Int, sourceToDest map[cciptypes.Address]cciptypes.Address, tokensAndAmount []cciptypes.TokenAmount) (*big.Int, error) { + sum := big.NewInt(0) + for i := 0; i < len(tokensAndAmount); i++ { + price, ok := destTokenPricesUSD[sourceToDest[tokensAndAmount[i].Token]] + if !ok { + // If we don't have a price for the token, we will assume it's worth 0. + lggr.Infof("No price for token %s, assuming 0", tokensAndAmount[i].Token) + continue + } + sum.Add(sum, new(big.Int).Quo(new(big.Int).Mul(price, tokensAndAmount[i].Amount), big.NewInt(1e18))) + } + return sum, nil +} + +func updateBatchContext( + batchCtx *BatchContext, + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, + messageMaxGas uint64, + msgValue *big.Int, + msgLggr logger.Logger) { + batchCtx.availableGas -= messageMaxGas + batchCtx.availableDataLen -= len(msg.Data) + batchCtx.aggregateTokenLimit.Sub(batchCtx.aggregateTokenLimit, msgValue) + if msg.Nonce > 0 { + batchCtx.expectedNonces[msg.Sender] = msg.Nonce + 1 + } + + msgLggr.Infow( + "Message successfully added to execution batch", + "nonce", msg.Nonce, + "sender", msg.Sender, + "value", msgValue, + "availableAggrTokenLimit", batchCtx.aggregateTokenLimit, + "availableGas", batchCtx.availableGas, + "availableDataLen", batchCtx.availableDataLen, + ) +} + +func hasEnoughTokens(tokenLimit *big.Int, msgValue *big.Int, inflightValue *big.Int) (*big.Int, bool) { + tokensLeft := big.NewInt(0).Sub(tokenLimit, inflightValue) + return tokensLeft, tokensLeft.Cmp(msgValue) >= 0 +} + +func buildExecutionReportForMessages( + msgsInRoot []cciptypes.EVM2EVMMessageWithTxMeta, + tree *merklemulti.Tree[[32]byte], + commitInterval cciptypes.CommitStoreInterval, + observedMessages []ccip.ObservedMessage, +) (cciptypes.ExecReport, error) { + innerIdxs := make([]int, 0, len(observedMessages)) + var messages []cciptypes.EVM2EVMMessage + var offchainTokenData [][][]byte + for _, observedMessage := range observedMessages { + if observedMessage.SeqNr < commitInterval.Min || observedMessage.SeqNr > commitInterval.Max { + // We only return messages from a single root (the root of the first message). + continue + } + innerIdx := int(observedMessage.SeqNr - commitInterval.Min) + if innerIdx >= len(msgsInRoot) || innerIdx < 0 { + return cciptypes.ExecReport{}, fmt.Errorf("invalid inneridx SeqNr=%d IntervalMin=%d msgsInRoot=%d", + observedMessage.SeqNr, commitInterval.Min, len(msgsInRoot)) + } + messages = append(messages, msgsInRoot[innerIdx].EVM2EVMMessage) + offchainTokenData = append(offchainTokenData, observedMessage.TokenData) + innerIdxs = append(innerIdxs, innerIdx) + } + + merkleProof, err := tree.Prove(innerIdxs) + if err != nil { + return cciptypes.ExecReport{}, err + } + + // any capped proof will have length <= this one, so we reuse it to avoid proving inside loop, and update later if changed + return cciptypes.ExecReport{ + Messages: messages, + Proofs: merkleProof.Hashes, + ProofFlagBits: abihelpers.ProofFlagsToBits(merkleProof.SourceFlags), + OffchainTokenData: offchainTokenData, + }, nil +} + +// Validates the given message observations do not exceed the committed sequence numbers +// in the commitStoreReader. +func validateSeqNumbers(serviceCtx context.Context, commitStore ccipdata.CommitStoreReader, observedMessages []ccip.ObservedMessage) error { + nextMin, err := commitStore.GetExpectedNextSequenceNumber(serviceCtx) + if err != nil { + return err + } + // observedMessages are always sorted by SeqNr and never empty, so it's safe to take last element + maxSeqNumInBatch := observedMessages[len(observedMessages)-1].SeqNr + + if maxSeqNumInBatch >= nextMin { + return errors.Errorf("Cannot execute uncommitted seq num. nextMin %v, seqNums %v", nextMin, observedMessages) + } + return nil +} + +// Gets the commit report from the saved logs for a given sequence number. +func getCommitReportForSeqNum(ctx context.Context, commitStoreReader ccipdata.CommitStoreReader, seqNum uint64) (cciptypes.CommitStoreReport, error) { + acceptedReports, err := commitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, 0) + if err != nil { + return cciptypes.CommitStoreReport{}, err + } + + if len(acceptedReports) == 0 { + return cciptypes.CommitStoreReport{}, errors.Errorf("seq number not committed") + } + + return acceptedReports[0].CommitStoreReport, nil +} + +type messageStatus string + +const ( + SuccesfullyValidated messageStatus = "successfully_validated" + AlreadyExecuted messageStatus = "already_executed" + SenderAlreadySkipped messageStatus = "sender_already_skipped" + MessageMaxGasCalcError messageStatus = "message_max_gas_calc_error" + InsufficientRemainingBatchDataLength messageStatus = "insufficient_remaining_batch_data_length" + InsufficientRemainingBatchGas messageStatus = "insufficient_remaining_batch_gas" + MissingNonce messageStatus = "missing_nonce" + InvalidNonce messageStatus = "invalid_nonce" + AggregateTokenValueComputeError messageStatus = "aggregate_token_value_compute_error" + AggregateTokenLimitExceeded messageStatus = "aggregate_token_limit_exceeded" + TokenDataNotReady messageStatus = "token_data_not_ready" + TokenDataFetchError messageStatus = "token_data_fetch_error" + TokenNotInDestTokenPrices messageStatus = "token_not_in_dest_token_prices" + TokenNotInSrcTokenPrices messageStatus = "token_not_in_src_token_prices" + InsufficientRemainingFee messageStatus = "insufficient_remaining_fee" + AddedToBatch messageStatus = "added_to_batch" + TXMCheckError messageStatus = "txm_check_error" + TXMFatalStatus messageStatus = "txm_fatal_status" + SkippedInflight messageStatus = "skipped_inflight" +) + +func (m messageStatus) shouldBeSkipped() bool { + return m != SuccesfullyValidated +} + +type messageExecStatus struct { + SeqNr uint64 + MessageId string + Status messageStatus +} + +func newMessageExecState(seqNr uint64, messageId cciptypes.Hash, status messageStatus) messageExecStatus { + return messageExecStatus{ + SeqNr: seqNr, + MessageId: hexutil.Encode(messageId[:]), + Status: status, + } +} + +type batchBuildContainer struct { + batch []ccip.ObservedMessage + statuses []messageExecStatus +} + +func newBatchBuildContainer(capacity int) *batchBuildContainer { + return &batchBuildContainer{ + batch: make([]ccip.ObservedMessage, 0, capacity), + statuses: make([]messageExecStatus, 0, capacity), + } +} + +func (m *batchBuildContainer) skip(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, status messageStatus) { + m.addState(msg, status) +} + +func (m *batchBuildContainer) addToBatch(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenData [][]byte) { + m.addState(msg, AddedToBatch) + m.batch = append(m.batch, ccip.NewObservedMessage(msg.SequenceNumber, tokenData)) +} + +func (m *batchBuildContainer) addState(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, state messageStatus) { + m.statuses = append(m.statuses, newMessageExecState(msg.SequenceNumber, msg.MessageID, state)) +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go b/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go new file mode 100644 index 00000000000..3647556a6d5 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/batching_test.go @@ -0,0 +1,910 @@ +package ccipexec + +import ( + "bytes" + "context" + "encoding/binary" + "math" + "math/big" + "reflect" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink-common/pkg/types" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + mockstatuschecker "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker/mocks" +) + +type testCase struct { + name string + reqs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + inflight []InflightInternalExecutionReport + tokenLimit, destGasPrice, inflightAggregateValue *big.Int + srcPrices, dstPrices map[cciptypes.Address]*big.Int + offRampNoncesBySender map[cciptypes.Address]uint64 + srcToDestTokens map[cciptypes.Address]cciptypes.Address + expectedSeqNrs []ccip.ObservedMessage + expectedStates []messageExecStatus + statuschecker func(m *mockstatuschecker.CCIPTransactionStatusChecker) + skipGasPriceEstimator bool +} + +func Test_NewBatchingStrategy(t *testing.T) { + t.Parallel() + + mockStatusChecker := mockstatuschecker.NewCCIPTransactionStatusChecker(t) + + testCases := []int{0, 1, 2} + + for _, batchingStrategyId := range testCases { + factory, err := NewBatchingStrategy(uint32(batchingStrategyId), mockStatusChecker) + if batchingStrategyId == 2 { + assert.Error(t, err) + } else { + assert.NotNil(t, factory) + assert.NoError(t, err) + } + } +} + +func Test_validateSendRequests(t *testing.T) { + testCases := []struct { + name string + seqNums []uint64 + providedInterval cciptypes.CommitStoreInterval + expErr bool + }{ + { + name: "zero interval no seq nums", + seqNums: nil, + providedInterval: cciptypes.CommitStoreInterval{Min: 0, Max: 0}, + expErr: true, + }, + { + name: "exp 1 seq num got none", + seqNums: nil, + providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + expErr: true, + }, + { + name: "exp 10 seq num got none", + seqNums: nil, + providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + expErr: true, + }, + { + name: "got 1 seq num as expected", + seqNums: []uint64{1}, + providedInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 1}, + expErr: false, + }, + { + name: "got 5 seq num as expected", + seqNums: []uint64{11, 12, 13, 14, 15}, + providedInterval: cciptypes.CommitStoreInterval{Min: 11, Max: 15}, + expErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sendReqs := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(tc.seqNums)) + for _, seqNum := range tc.seqNums { + sendReqs = append(sendReqs, cciptypes.EVM2EVMMessageWithTxMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: seqNum}, + }) + } + err := validateSendRequests(sendReqs, tc.providedInterval) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +type delayedTokenDataWorker struct { + delay time.Duration + tokendata.Worker +} + +func (m delayedTokenDataWorker) GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error) { + time.Sleep(m.delay) + return nil, ctx.Err() +} + +func TestExecutionReportingPlugin_getTokenDataWithCappedLatency(t *testing.T) { + testCases := []struct { + name string + allowedWaitingTime time.Duration + workerLatency time.Duration + expErr bool + }{ + { + name: "happy flow", + allowedWaitingTime: 10 * time.Millisecond, + workerLatency: time.Nanosecond, + expErr: false, + }, + { + name: "worker takes long to reply", + allowedWaitingTime: 10 * time.Millisecond, + workerLatency: 20 * time.Millisecond, + expErr: true, + }, + } + + ctx := testutils.Context(t) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tokenDataWorker := delayedTokenDataWorker{delay: tc.workerLatency} + + msg := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{TokenAmounts: make([]cciptypes.TokenAmount, 1)}, + } + + _, _, err := getTokenDataWithTimeout(ctx, msg, tc.allowedWaitingTime, tokenDataWorker) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func TestBatchingStrategies(t *testing.T) { + sender1 := ccipcalc.HexToAddress("0xa") + destNative := ccipcalc.HexToAddress("0xb") + srcNative := ccipcalc.HexToAddress("0xc") + + msg1 := createTestMessage(1, sender1, 1, srcNative, big.NewInt(1e9), false, nil) + + msg2 := msg1 + msg2.Executed = true + + msg3 := msg1 + msg3.Executed = true + msg3.Finalized = true + + msg4 := msg1 + msg4.TokenAmounts = []cciptypes.TokenAmount{ + {Token: srcNative, Amount: big.NewInt(100)}, + } + + msg5 := msg4 + msg5.SequenceNumber = msg5.SequenceNumber + 1 + msg5.Nonce = msg5.Nonce + 1 + + zkMsg1 := createTestMessage(1, sender1, 0, srcNative, big.NewInt(1e9), false, nil) + zkMsg2 := createTestMessage(2, sender1, 0, srcNative, big.NewInt(1e9), false, nil) + zkMsg3 := createTestMessage(3, sender1, 0, srcNative, big.NewInt(1e9), false, nil) + zkMsg4 := createTestMessage(4, sender1, 0, srcNative, big.NewInt(1e9), false, nil) + + testCases := []testCase{ + { + name: "single message no tokens", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(1)}}, + expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, AddedToBatch)}, + }, + { + name: "gasPriceEstimator returns error", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + }, + { + name: "executed non finalized messages should be skipped", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{newMessageExecState(msg2.SequenceNumber, msg2.MessageID, AlreadyExecuted)}, + skipGasPriceEstimator: true, + }, + { + name: "finalized executed log", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg3}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{newMessageExecState(msg3.SequenceNumber, msg3.MessageID, AlreadyExecuted)}, + skipGasPriceEstimator: true, + }, + { + name: "dst token price does not exist", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, TokenNotInDestTokenPrices)}, + skipGasPriceEstimator: true, + }, + { + name: "src token price does not exist", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, TokenNotInSrcTokenPrices)}, + skipGasPriceEstimator: true, + }, + { + name: "message with tokens is not executed if limit is reached", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg4}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(99), + destGasPrice: big.NewInt(1), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1e18)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1e18)}, + srcToDestTokens: map[cciptypes.Address]cciptypes.Address{ + srcNative: destNative, + }, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{newMessageExecState(msg4.SequenceNumber, msg4.MessageID, AggregateTokenLimitExceeded)}, + skipGasPriceEstimator: true, + }, + { + name: "message with tokens is not executed if limit is reached when inflight is full", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg5}, + inflight: []InflightInternalExecutionReport{{createdAt: time.Now(), messages: []cciptypes.EVM2EVMMessage{msg4.EVM2EVMMessage}}}, + inflightAggregateValue: big.NewInt(100), + tokenLimit: big.NewInt(50), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1e18)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1e18)}, + srcToDestTokens: map[cciptypes.Address]cciptypes.Address{ + srcNative: destNative, + }, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 1}, + expectedStates: []messageExecStatus{newMessageExecState(msg5.SequenceNumber, msg5.MessageID, AggregateTokenLimitExceeded)}, + skipGasPriceEstimator: true, + }, + { + name: "skip when nonce doesn't match chain value", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 123}, + expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, InvalidNonce)}, + skipGasPriceEstimator: true, + }, + { + name: "skip when nonce not found", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{msg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{}, + expectedStates: []messageExecStatus{newMessageExecState(msg1.SequenceNumber, msg1.MessageID, MissingNonce)}, + skipGasPriceEstimator: true, + }, + { + name: "unordered messages", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 10, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 0, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + }, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}}, + expectedStates: []messageExecStatus{ + newMessageExecState(10, [32]byte{}, AddedToBatch), + }, + }, + { + name: "unordered messages not blocked by nonce", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 9, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 5, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 10, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 0, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + }, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 3}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}}, + expectedStates: []messageExecStatus{ + newMessageExecState(9, [32]byte{}, InvalidNonce), + newMessageExecState(10, [32]byte{}, AddedToBatch), + }, + }, + } + + bestEffortTestCases := []testCase{ + { + name: "skip when batch gas limit is reached", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 10, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 1, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 11, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 2, + GasLimit: big.NewInt(math.MaxInt64), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 12, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 3, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + }, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}}, + expectedStates: []messageExecStatus{ + newMessageExecState(10, [32]byte{}, AddedToBatch), + newMessageExecState(11, [32]byte{}, InsufficientRemainingBatchGas), + newMessageExecState(12, [32]byte{}, InvalidNonce), + }, + }, + { + name: "some messages skipped after hitting max batch data len", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 10, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 1, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 11, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 2, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, MaxDataLenPerBatch-500), // skipped from batch + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 12, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 3, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + }, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(10)}}, + expectedStates: []messageExecStatus{ + newMessageExecState(10, [32]byte{}, AddedToBatch), + newMessageExecState(11, [32]byte{}, InsufficientRemainingBatchDataLength), + newMessageExecState(12, [32]byte{}, InvalidNonce), + }, + }, + { + name: "unordered messages then ordered messages", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 9, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 0, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: 10, + FeeTokenAmount: big.NewInt(1e9), + Sender: sender1, + Nonce: 5, + GasLimit: big.NewInt(1), + Data: bytes.Repeat([]byte{'a'}, 1000), + FeeToken: srcNative, + MessageID: [32]byte{}, + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + }, + }, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 4}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: uint64(9)}, {SeqNr: uint64(10)}}, + expectedStates: []messageExecStatus{ + newMessageExecState(9, [32]byte{}, AddedToBatch), + newMessageExecState(10, [32]byte{}, AddedToBatch), + }, + }, + } + + specificZkOverflowTestCases := []testCase{ + { + name: "batch size is 1", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2, zkMsg3}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg1.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, mock.Anything).Return([]types.TransactionStatus{}, -1, nil) + }, + }, + { + name: "snooze fatal message and return empty batch", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil) + }, + skipGasPriceEstimator: true, + }, + { + name: "snooze fatal message and add next message to batch", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus), + newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil) + m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil) + }, + }, + { + name: "all messages are fatal and batch is empty", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus), + newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, TXMFatalStatus), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil) + m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{types.Fatal}, 0, nil) + }, + skipGasPriceEstimator: true, + }, + { + name: "message batched when unconfirmed or failed", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg1.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed}, 1, nil) + }, + }, + { + name: "message snoozed when multiple statuses with fatal", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus), + newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed, types.Fatal}, 2, nil) + m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil) + }, + }, + { + name: "txm return error for message", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg2.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMCheckError), + newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{}, -1, errors.New("dummy txm error")) + m.On("CheckMessageStatus", mock.Anything, zkMsg2.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil) + }, + }, + { + name: "snooze message when inflight", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1}, + inflight: createInflight(zkMsg1), + inflightAggregateValue: zkMsg1.FeeTokenAmount, + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, SkippedInflight), + }, + skipGasPriceEstimator: true, + }, + { + name: "snooze when not inflight but txm returns error", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMCheckError), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{}, -1, errors.New("dummy txm error")) + }, + skipGasPriceEstimator: true, + }, + { + name: "snooze when not inflight but txm returns fatal status", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1}, + inflight: []InflightInternalExecutionReport{}, + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, TXMFatalStatus), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg1.MessageID.String()).Return([]types.TransactionStatus{types.Unconfirmed, types.Failed, types.Fatal}, 2, nil) + }, + skipGasPriceEstimator: true, + }, + { + name: "snooze messages when inflight but batch valid messages", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{zkMsg1, zkMsg2, zkMsg3, zkMsg4}, + inflight: createInflight(zkMsg1, zkMsg2), + inflightAggregateValue: big.NewInt(0), + tokenLimit: big.NewInt(0), + destGasPrice: big.NewInt(10), + srcPrices: map[cciptypes.Address]*big.Int{srcNative: big.NewInt(1)}, + dstPrices: map[cciptypes.Address]*big.Int{destNative: big.NewInt(1)}, + offRampNoncesBySender: map[cciptypes.Address]uint64{sender1: 0}, + expectedSeqNrs: []ccip.ObservedMessage{{SeqNr: zkMsg3.SequenceNumber}}, + expectedStates: []messageExecStatus{ + newMessageExecState(zkMsg1.SequenceNumber, zkMsg1.MessageID, SkippedInflight), + newMessageExecState(zkMsg2.SequenceNumber, zkMsg2.MessageID, SkippedInflight), + newMessageExecState(zkMsg3.SequenceNumber, zkMsg3.MessageID, AddedToBatch), + }, + statuschecker: func(m *mockstatuschecker.CCIPTransactionStatusChecker) { + m.Mock = mock.Mock{} // reset mock + m.On("CheckMessageStatus", mock.Anything, zkMsg3.MessageID.String()).Return([]types.TransactionStatus{}, -1, nil) + }, + skipGasPriceEstimator: false, + }, + } + + t.Run("BestEffortBatchingStrategy", func(t *testing.T) { + strategy := &BestEffortBatchingStrategy{} + runBatchingStrategyTests(t, strategy, 1_000_000, append(testCases, bestEffortTestCases...)) + }) + + t.Run("ZKOverflowBatchingStrategy", func(t *testing.T) { + mockedStatusChecker := mockstatuschecker.NewCCIPTransactionStatusChecker(t) + strategy := &ZKOverflowBatchingStrategy{ + statuschecker: mockedStatusChecker, + } + runBatchingStrategyTests(t, strategy, 1_000_000, append(testCases, specificZkOverflowTestCases...)) + }) +} + +// Function to set up and run tests for a given batching strategy +func runBatchingStrategyTests(t *testing.T, strategy BatchingStrategy, availableGas uint64, testCases []testCase) { + destNative := ccipcalc.HexToAddress("0xb") + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + lggr := logger.TestLogger(t) + + gasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t) + if !tc.skipGasPriceEstimator { + if tc.expectedSeqNrs != nil { + gasPriceEstimator.On("EstimateMsgCostUSD", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(0), nil) + } else { + gasPriceEstimator.On("EstimateMsgCostUSD", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(0), errors.New("error")) + } + } + + // default case for ZKOverflowBatchingStrategy + if strategyType := reflect.TypeOf(strategy); tc.statuschecker == nil && strategyType == reflect.TypeOf(&ZKOverflowBatchingStrategy{}) { + strategy.(*ZKOverflowBatchingStrategy).statuschecker.(*mockstatuschecker.CCIPTransactionStatusChecker).On("CheckMessageStatus", mock.Anything, mock.Anything).Return([]types.TransactionStatus{}, -1, nil) + } + + // Mock calls to TXM + if tc.statuschecker != nil { + tc.statuschecker(strategy.(*ZKOverflowBatchingStrategy).statuschecker.(*mockstatuschecker.CCIPTransactionStatusChecker)) + } + + batchContext := &BatchContext{ + report: commitReportWithSendRequests{sendRequestsWithMeta: tc.reqs}, + inflight: tc.inflight, + inflightAggregateValue: tc.inflightAggregateValue, + lggr: lggr, + availableDataLen: MaxDataLenPerBatch, + availableGas: availableGas, + expectedNonces: make(map[cciptypes.Address]uint64), + sendersNonce: tc.offRampNoncesBySender, + sourceTokenPricesUSD: tc.srcPrices, + destTokenPricesUSD: tc.dstPrices, + gasPrice: tc.destGasPrice, + sourceToDestToken: tc.srcToDestTokens, + aggregateTokenLimit: tc.tokenLimit, + tokenDataRemainingDuration: 5 * time.Second, + tokenDataWorker: tokendata.NewBackgroundWorker(map[cciptypes.Address]tokendata.Reader{}, 10, 5*time.Second, time.Hour), + gasPriceEstimator: gasPriceEstimator, + destWrappedNative: destNative, + offchainConfig: cciptypes.ExecOffchainConfig{ + DestOptimisticConfirmations: 1, + BatchGasLimit: 300_000, + RelativeBoostPerWaitHour: 1, + }, + } + + seqNrs, execStates := strategy.BuildBatch(context.Background(), batchContext) + + runAssertions(t, tc, seqNrs, execStates) + }) + } +} + +// Utility function to run common assertions +func runAssertions(t *testing.T, tc testCase, seqNrs []ccip.ObservedMessage, execStates []messageExecStatus) { + if tc.expectedSeqNrs == nil { + assert.Len(t, seqNrs, 0) + } else { + assert.Equal(t, tc.expectedSeqNrs, seqNrs) + } + + if tc.expectedStates == nil { + assert.Len(t, execStates, 0) + } else { + assert.Equal(t, tc.expectedStates, execStates) + } +} + +func createTestMessage(seqNr uint64, sender cciptypes.Address, nonce uint64, feeToken cciptypes.Address, feeAmount *big.Int, executed bool, data []byte) cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta { + return cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: seqNr, + FeeTokenAmount: feeAmount, + Sender: sender, + Nonce: nonce, + GasLimit: big.NewInt(1), + Strict: false, + Receiver: "", + Data: data, + TokenAmounts: nil, + FeeToken: feeToken, + MessageID: generateMessageIDFromInt(seqNr), + }, + BlockTimestamp: time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC), + Executed: executed, + } +} + +func generateMessageIDFromInt(input uint64) [32]byte { + var messageID [32]byte + binary.LittleEndian.PutUint32(messageID[:], uint32(input)) + return messageID +} + +func createInflight(msgs ...cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) []InflightInternalExecutionReport { + reports := make([]InflightInternalExecutionReport, len(msgs)) + + for i, msg := range msgs { + reports[i] = InflightInternalExecutionReport{ + messages: []cciptypes.EVM2EVMMessage{msg.EVM2EVMMessage}, + createdAt: msg.BlockTimestamp, + } + } + + return reports +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/factory.go b/core/services/ocr2/plugins/ccip/ccipexec/factory.go new file mode 100644 index 00000000000..97caf2e719c --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/factory.go @@ -0,0 +1,164 @@ +package ccipexec + +import ( + "context" + "fmt" + "sync" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type ExecutionReportingPluginFactory struct { + // Config derived from job specs and does not change between instances. + config ExecutionPluginStaticConfig + + destPriceRegReader ccipdata.PriceRegistryReader + destPriceRegAddr cciptypes.Address + readersMu *sync.Mutex +} + +func NewExecutionReportingPluginFactory(config ExecutionPluginStaticConfig) *ExecutionReportingPluginFactory { + return &ExecutionReportingPluginFactory{ + config: config, + readersMu: &sync.Mutex{}, + + // the fields below are initially empty and populated on demand + destPriceRegReader: nil, + destPriceRegAddr: "", + } +} + +func (rf *ExecutionReportingPluginFactory) UpdateDynamicReaders(ctx context.Context, newPriceRegAddr cciptypes.Address) error { + rf.readersMu.Lock() + defer rf.readersMu.Unlock() + // TODO: Investigate use of Close() to cleanup. + // TODO: a true price registry upgrade on an existing lane may want some kind of start block in its config? Right now we + // essentially assume that plugins don't care about historical price reg logs. + if rf.destPriceRegAddr == newPriceRegAddr { + // No-op + return nil + } + // Close old reader (if present) and open new reader if address changed. + if rf.destPriceRegReader != nil { + if err := rf.destPriceRegReader.Close(); err != nil { + return err + } + } + + destPriceRegistryReader, err := rf.config.priceRegistryProvider.NewPriceRegistryReader(context.Background(), newPriceRegAddr) + if err != nil { + return err + } + rf.destPriceRegReader = destPriceRegistryReader + rf.destPriceRegAddr = newPriceRegAddr + return nil +} + +type reportingPluginAndInfo struct { + plugin types.ReportingPlugin + pluginInfo types.ReportingPluginInfo +} + +// NewReportingPlugin registers a new ReportingPlugin +func (rf *ExecutionReportingPluginFactory) NewReportingPlugin(config types.ReportingPluginConfig) (types.ReportingPlugin, types.ReportingPluginInfo, error) { + initialRetryDelay := rf.config.newReportingPluginRetryConfig.InitialDelay + maxDelay := rf.config.newReportingPluginRetryConfig.MaxDelay + + pluginAndInfo, err := ccipcommon.RetryUntilSuccess(rf.NewReportingPluginFn(config), initialRetryDelay, maxDelay) + if err != nil { + return nil, types.ReportingPluginInfo{}, err + } + return pluginAndInfo.plugin, pluginAndInfo.pluginInfo, err +} + +// NewReportingPluginFn implements the NewReportingPlugin logic. It is defined as a function so that it can easily be +// retried via RetryUntilSuccess. NewReportingPlugin must return successfully in order for the Exec plugin to function, +// hence why we can only keep retrying it until it succeeds. +func (rf *ExecutionReportingPluginFactory) NewReportingPluginFn(config types.ReportingPluginConfig) func() (reportingPluginAndInfo, error) { + return func() (reportingPluginAndInfo, error) { + ctx := context.Background() // todo: consider setting a timeout + + destPriceRegistry, destWrappedNative, err := rf.config.offRampReader.ChangeConfig(ctx, config.OnchainConfig, config.OffchainConfig) + if err != nil { + return reportingPluginAndInfo{}, err + } + + // Open dynamic readers + err = rf.UpdateDynamicReaders(ctx, destPriceRegistry) + if err != nil { + return reportingPluginAndInfo{}, err + } + + offchainConfig, err := rf.config.offRampReader.OffchainConfig(ctx) + if err != nil { + return reportingPluginAndInfo{}, fmt.Errorf("get offchain config from offramp: %w", err) + } + + gasPriceEstimator, err := rf.config.offRampReader.GasPriceEstimator(ctx) + if err != nil { + return reportingPluginAndInfo{}, fmt.Errorf("get gas price estimator from offramp: %w", err) + } + + onchainConfig, err := rf.config.offRampReader.OnchainConfig(ctx) + if err != nil { + return reportingPluginAndInfo{}, fmt.Errorf("get onchain config from offramp: %w", err) + } + + batchingStrategy, err := NewBatchingStrategy(offchainConfig.BatchingStrategyID, rf.config.txmStatusChecker) + if err != nil { + return reportingPluginAndInfo{}, fmt.Errorf("get batching strategy: %w", err) + } + + msgVisibilityInterval := offchainConfig.MessageVisibilityInterval.Duration() + if msgVisibilityInterval.Seconds() == 0 { + rf.config.lggr.Info("MessageVisibilityInterval not set, falling back to PermissionLessExecutionThreshold") + msgVisibilityInterval = onchainConfig.PermissionLessExecutionThresholdSeconds + } + rf.config.lggr.Infof("MessageVisibilityInterval set to: %s", msgVisibilityInterval) + + lggr := rf.config.lggr.Named("ExecutionReportingPlugin") + plugin := &ExecutionReportingPlugin{ + F: config.F, + lggr: lggr, + offchainConfig: offchainConfig, + tokenDataWorker: rf.config.tokenDataWorker, + gasPriceEstimator: gasPriceEstimator, + sourcePriceRegistryProvider: rf.config.sourcePriceRegistryProvider, + sourcePriceRegistryLock: sync.RWMutex{}, + sourceWrappedNativeToken: rf.config.sourceWrappedNativeToken, + onRampReader: rf.config.onRampReader, + commitStoreReader: rf.config.commitStoreReader, + destPriceRegistry: rf.destPriceRegReader, + destWrappedNative: destWrappedNative, + onchainConfig: onchainConfig, + offRampReader: rf.config.offRampReader, + tokenPoolBatchedReader: rf.config.tokenPoolBatchedReader, + inflightReports: newInflightExecReportsContainer(offchainConfig.InflightCacheExpiry.Duration()), + commitRootsCache: cache.NewCommitRootsCache(lggr, rf.config.commitStoreReader, msgVisibilityInterval, offchainConfig.RootSnoozeTime.Duration()), + metricsCollector: rf.config.metricsCollector, + chainHealthcheck: rf.config.chainHealthcheck, + batchingStrategy: batchingStrategy, + } + + pluginInfo := types.ReportingPluginInfo{ + Name: "CCIPExecution", + // Setting this to false saves on calldata since OffRamp doesn't require agreement between NOPs + // (OffRamp is only able to execute committed messages). + UniqueReports: false, + Limits: types.ReportingPluginLimits{ + MaxObservationLength: ccip.MaxObservationLength, + MaxReportLength: MaxExecutionReportLength, + }, + } + + return reportingPluginAndInfo{plugin, pluginInfo}, nil + } +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go b/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go new file mode 100644 index 00000000000..7bbb9be0c69 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/factory_test.go @@ -0,0 +1,67 @@ +package ccipexec + +import ( + "errors" + "testing" + "time" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" +) + +// Assert that NewReportingPlugin keeps retrying until it succeeds. +// +// NewReportingPlugin makes several calls (e.g. OffRampReader.ChangeConfig()) that can fail. We use mocks to cause the +// first call to each of these functions to fail, then all subsequent calls succeed. We assert that NewReportingPlugin +// retries a sufficient number of times to get through the transient errors and eventually succeed. +func TestNewReportingPluginRetriesUntilSuccess(t *testing.T) { + execConfig := ExecutionPluginStaticConfig{} + + // For this unit test, ensure that there is no delay between retries + execConfig.newReportingPluginRetryConfig = ccipdata.RetryConfig{ + InitialDelay: 0 * time.Nanosecond, + MaxDelay: 0 * time.Nanosecond, + } + + // Set up the OffRampReader mock + mockOffRampReader := new(mocks.OffRampReader) + + // The first call is set to return an error, the following calls return a nil error + mockOffRampReader.On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).Return(ccip.Address(""), ccip.Address(""), errors.New("")).Once() + mockOffRampReader.On("ChangeConfig", mock.Anything, mock.Anything, mock.Anything).Return(ccip.Address("addr1"), ccip.Address("addr2"), nil).Times(5) + + mockOffRampReader.On("OffchainConfig", mock.Anything).Return(ccip.ExecOffchainConfig{}, errors.New("")).Once() + mockOffRampReader.On("OffchainConfig", mock.Anything).Return(ccip.ExecOffchainConfig{}, nil).Times(3) + + mockOffRampReader.On("GasPriceEstimator", mock.Anything).Return(nil, errors.New("")).Once() + mockOffRampReader.On("GasPriceEstimator", mock.Anything).Return(nil, nil).Times(2) + + mockOffRampReader.On("OnchainConfig", mock.Anything).Return(ccip.ExecOnchainConfig{}, errors.New("")).Once() + mockOffRampReader.On("OnchainConfig", mock.Anything).Return(ccip.ExecOnchainConfig{}, nil).Times(1) + + execConfig.offRampReader = mockOffRampReader + + // Set up the PriceRegistry mock + priceRegistryProvider := new(ccipdataprovidermocks.PriceRegistry) + priceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, mock.Anything).Return(nil, errors.New("")).Once() + priceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, mock.Anything).Return(nil, nil).Once() + execConfig.priceRegistryProvider = priceRegistryProvider + + execConfig.lggr, _ = logger.NewLogger() + + factory := NewExecutionReportingPluginFactory(execConfig) + reportingConfig := types.ReportingPluginConfig{} + reportingConfig.OnchainConfig = []byte{1, 2, 3} + reportingConfig.OffchainConfig = []byte{1, 2, 3} + + // Assert that NewReportingPlugin succeeds despite many transient internal failures (mocked out above) + _, _, err := factory.NewReportingPlugin(reportingConfig) + assert.Equal(t, nil, err) +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go new file mode 100644 index 00000000000..7e208296c53 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers.go @@ -0,0 +1,83 @@ +package ccipexec + +import ( + "math" + "math/big" + "time" +) + +const ( + EvmAddressLengthBytes = 20 + EvmWordBytes = 32 + CalldataGasPerByte = 16 + TokenAdminRegistryWarmupCost = 2_500 + TokenAdminRegistryPoolLookupGas = 100 + // WARM_ACCESS_COST TokenAdminRegistry + 700 + // CALL cost for TokenAdminRegistry + 2_100 // COLD_SLOAD_COST loading the pool address + SupportsInterfaceCheck = 2600 + // because the receiver will be untouched initially + 30_000*3 // supportsInterface of ERC165Checker library performs 3 static-calls of 30k gas each + PerTokenOverheadGas = TokenAdminRegistryPoolLookupGas + + SupportsInterfaceCheck + + 200_000 + // releaseOrMint using callWithExactGas + 50_000 // transfer using callWithExactGas + RateLimiterOverheadGas = 2_100 + // COLD_SLOAD_COST for accessing token bucket + 5_000 // SSTORE_RESET_GAS for updating & decreasing token bucket + ConstantMessagePartBytes = 10 * 32 // A message consists of 10 abi encoded fields 32B each (after encoding) + ExecutionStateProcessingOverheadGas = 2_100 + // COLD_SLOAD_COST for first reading the state + 20_000 + // SSTORE_SET_GAS for writing from 0 (untouched) to non-zero (in-progress) + 100 //# SLOAD_GAS = WARM_STORAGE_READ_COST for rewriting from non-zero (in-progress) to non-zero (success/failure) +) + +// return the size of bytes for msg tokens +func bytesForMsgTokens(numTokens int) int { + // token address (address) + token amount (uint256) + return (EvmAddressLengthBytes + EvmWordBytes) * numTokens +} + +// Offchain: we compute the max overhead gas to determine msg executability. +func overheadGas(dataLength, numTokens int) uint64 { + messageBytes := ConstantMessagePartBytes + + bytesForMsgTokens(numTokens) + + dataLength + + messageCallDataGas := uint64(messageBytes * CalldataGasPerByte) + + // Rate limiter only limits value in tokens. It's not called if there are no + // tokens in the message. The same goes for the admin registry, it's only loaded + // if there are tokens, and it's only loaded once. + rateLimiterOverhead := uint64(0) + adminRegistryOverhead := uint64(0) + if numTokens >= 1 { + rateLimiterOverhead = RateLimiterOverheadGas + adminRegistryOverhead = TokenAdminRegistryWarmupCost + } + + return messageCallDataGas + + ExecutionStateProcessingOverheadGas + + SupportsInterfaceCheck + + adminRegistryOverhead + + rateLimiterOverhead + + PerTokenOverheadGas*uint64(numTokens) +} + +func maxGasOverHeadGas(numMsgs, dataLength, numTokens int) uint64 { + merkleProofBytes := (math.Ceil(math.Log2(float64(numMsgs))))*32 + (1+2)*32 // only ever one outer root hash + merkleGasShare := uint64(merkleProofBytes * CalldataGasPerByte) + + return overheadGas(dataLength, numTokens) + merkleGasShare +} + +// waitBoostedFee boosts the given fee according to the time passed since the msg was sent. +// RelativeBoostPerWaitHour is used to normalize the time diff, +// it makes our loss taking "smooth" and gives us time to react without a hard deadline. +// At the same time, messages that are slightly underpaid will start going through after waiting for a little bit. +// +// wait_boosted_fee(m) = (1 + (now - m.send_time).hours * RELATIVE_BOOST_PER_WAIT_HOUR) * fee(m) +func waitBoostedFee(waitTime time.Duration, fee *big.Int, relativeBoostPerWaitHour float64) *big.Int { + k := 1.0 + waitTime.Hours()*relativeBoostPerWaitHour + + boostedFee := big.NewFloat(0).Mul(big.NewFloat(k), new(big.Float).SetInt(fee)) + res, _ := boostedFee.Int(nil) + + return res +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go new file mode 100644 index 00000000000..15607cc310e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/gashelpers_test.go @@ -0,0 +1,179 @@ +package ccipexec + +import ( + "math/big" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestOverheadGas(t *testing.T) { + // Only Data and TokenAmounts are used from the messages + // And only the length is used so the contents doesn't matter. + tests := []struct { + dataLength int + numberOfTokens int + want uint64 + }{ + { + dataLength: 0, + numberOfTokens: 0, + want: 119920, + }, + { + dataLength: len([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + numberOfTokens: 1, + want: 475948, + }, + } + + for _, tc := range tests { + got := overheadGas(tc.dataLength, tc.numberOfTokens) + if !reflect.DeepEqual(tc.want, got) { + t.Fatalf("expected: %v, got: %v", tc.want, got) + } + } +} + +func TestMaxGasOverHeadGas(t *testing.T) { + // Only Data and TokenAmounts are used from the messages + // And only the length is used so the contents doesn't matter. + tests := []struct { + numMsgs int + dataLength int + numberOfTokens int + want uint64 + }{ + { + numMsgs: 6, + dataLength: 0, + numberOfTokens: 0, + want: 122992, + }, + { + numMsgs: 3, + dataLength: len([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), + numberOfTokens: 1, + want: 478508, + }, + } + + for _, tc := range tests { + got := maxGasOverHeadGas(tc.numMsgs, tc.dataLength, tc.numberOfTokens) + if !reflect.DeepEqual(tc.want, got) { + t.Fatalf("expected: %v, got: %v", tc.want, got) + } + } +} + +func TestWaitBoostedFee(t *testing.T) { + tests := []struct { + name string + sendTimeDiff time.Duration + fee *big.Int + diff *big.Int + relativeBoostPerWaitHour float64 + }{ + { + "wait 10s", + time.Second * 10, + big.NewInt(6e18), // Fee: 6 LINK + + big.NewInt(1166666666665984), // Boost: 0.01 LINK + 0.07, + }, + { + "wait 5m", + time.Minute * 5, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(35e15), // Boost: 0.35 LINK + 0.07, + }, + { + "wait 7m", + time.Minute * 7, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(49e15), // Boost: 0.49 LINK + 0.07, + }, + { + "wait 12m", + time.Minute * 12, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(84e15), // Boost: 0.84 LINK + 0.07, + }, + { + "wait 25m", + time.Minute * 25, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(174999999999998976), // Boost: 1.75 LINK + 0.07, + }, + { + "wait 1h", + time.Hour * 1, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(420e15), // Boost: 4.2 LINK + 0.07, + }, + { + "wait 5h", + time.Hour * 5, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(2100000000000001024), // Boost: 21LINK + 0.07, + }, + { + "wait 24h", + time.Hour * 24, + big.NewInt(6e18), // Fee: 6 LINK + big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1008e15)), // Boost: 100LINK + 0.07, + }, + { + "high boost wait 10s", + time.Second * 10, + big.NewInt(5e18), + big.NewInt(9722222222222336), // 1e16 + 0.7, + }, + { + "high boost wait 5m", + time.Minute * 5, + big.NewInt(5e18), + big.NewInt(291666666666667008), // 1e18 + 0.7, + }, + { + "high boost wait 25m", + time.Minute * 25, + big.NewInt(5e18), + big.NewInt(1458333333333334016), // 1e19 + 0.7, + }, + { + "high boost wait 5h", + time.Hour * 5, + big.NewInt(5e18), + big.NewInt(0).Mul(big.NewInt(10), big.NewInt(175e16)), // 1e20 + 0.7, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + boosted := waitBoostedFee(tc.sendTimeDiff, tc.fee, tc.relativeBoostPerWaitHour) + diff := big.NewInt(0).Sub(boosted, tc.fee) + assert.Equal(t, diff, tc.diff) + // we check that the actual diff is approximately equals to expected diff, + // as we might get slightly different results locally vs. CI therefore normal Equal() would be unstable + //diffUpperLimit := big.NewInt(0).Add(tc.diff, big.NewInt(1e9)) + //diffLowerLimit := big.NewInt(0).Add(tc.diff, big.NewInt(-1e9)) + //require.Equalf(t, -1, diff.Cmp(diffUpperLimit), "actual diff (%s) is larger than expected (%s)", diff.String(), diffUpperLimit.String()) + //require.Equal(t, 1, diff.Cmp(diffLowerLimit), "actual diff (%s) is smaller than expected (%s)", diff.String(), diffLowerLimit.String()) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/helpers.go b/core/services/ocr2/plugins/ccip/ccipexec/helpers.go new file mode 100644 index 00000000000..46df7d793ba --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/helpers.go @@ -0,0 +1,53 @@ +package ccipexec + +import ( + mapset "github.com/deckarep/golang-set/v2" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +// helper struct to hold the commitReport and the related send requests +type commitReportWithSendRequests struct { + commitReport cciptypes.CommitStoreReport + sendRequestsWithMeta []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta +} + +func (r *commitReportWithSendRequests) validate() error { + // make sure that number of messages is the expected + if exp := int(r.commitReport.Interval.Max - r.commitReport.Interval.Min + 1); len(r.sendRequestsWithMeta) != exp { + return errors.Errorf( + "unexpected missing sendRequestsWithMeta in committed root %x have %d want %d", r.commitReport.MerkleRoot, len(r.sendRequestsWithMeta), exp) + } + + return nil +} + +// uniqueSenders returns slice of unique senders based on the send requests. Order is preserved based on the order of the send requests (by sequence number). +func (r *commitReportWithSendRequests) uniqueSenders() []cciptypes.Address { + orderedUniqueSenders := make([]cciptypes.Address, 0, len(r.sendRequestsWithMeta)) + visitedSenders := mapset.NewSet[cciptypes.Address]() + + for _, req := range r.sendRequestsWithMeta { + if !visitedSenders.Contains(req.Sender) { + orderedUniqueSenders = append(orderedUniqueSenders, req.Sender) + visitedSenders.Add(req.Sender) + } + } + return orderedUniqueSenders +} + +func (r *commitReportWithSendRequests) allRequestsAreExecutedAndFinalized() bool { + for _, req := range r.sendRequestsWithMeta { + if !req.Executed || !req.Finalized { + return false + } + } + return true +} + +// checks if the send request fits the commit report interval +func (r *commitReportWithSendRequests) sendReqFits(sendReq cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) bool { + return sendReq.SequenceNumber >= r.commitReport.Interval.Min && + sendReq.SequenceNumber <= r.commitReport.Interval.Max +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go b/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go new file mode 100644 index 00000000000..daa54fd2428 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/helpers_test.go @@ -0,0 +1,96 @@ +package ccipexec + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +func Test_CommitReportWithSendRequests_uniqueSenders(t *testing.T) { + messageFn := func(address cciptypes.Address) cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta { + return cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{EVM2EVMMessage: cciptypes.EVM2EVMMessage{Sender: address}} + } + + tests := []struct { + name string + sendRequests []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + expUniqueSenders int + expSendersOrder []cciptypes.Address + }{ + { + name: "all unique senders", + sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + messageFn(cciptypes.Address(utils.RandomAddress().String())), + messageFn(cciptypes.Address(utils.RandomAddress().String())), + messageFn(cciptypes.Address(utils.RandomAddress().String())), + }, + expUniqueSenders: 3, + }, + { + name: "some senders are the same", + sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + messageFn("0x1"), + messageFn("0x2"), + messageFn("0x1"), + messageFn("0x2"), + messageFn("0x3"), + }, + expUniqueSenders: 3, + expSendersOrder: []cciptypes.Address{ + cciptypes.Address("0x1"), + cciptypes.Address("0x2"), + cciptypes.Address("0x3"), + }, + }, + { + name: "all senders are the same", + sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + messageFn("0x1"), + messageFn("0x1"), + messageFn("0x1"), + }, + expUniqueSenders: 1, + expSendersOrder: []cciptypes.Address{ + cciptypes.Address("0x1"), + }, + }, + { + name: "order is preserved", + sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + messageFn("0x3"), + messageFn("0x1"), + messageFn("0x3"), + messageFn("0x2"), + messageFn("0x2"), + messageFn("0x1"), + }, + expUniqueSenders: 3, + expSendersOrder: []cciptypes.Address{ + cciptypes.Address("0x3"), + cciptypes.Address("0x1"), + cciptypes.Address("0x2"), + }, + }, + { + name: "no senders", + sendRequests: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{}, + expUniqueSenders: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rep := commitReportWithSendRequests{sendRequestsWithMeta: tt.sendRequests} + uniqueSenders := rep.uniqueSenders() + + assert.Len(t, uniqueSenders, tt.expUniqueSenders) + if tt.expSendersOrder != nil { + assert.Equal(t, tt.expSendersOrder, uniqueSenders) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/inflight.go b/core/services/ocr2/plugins/ccip/ccipexec/inflight.go new file mode 100644 index 00000000000..c76bfdf7780 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/inflight.go @@ -0,0 +1,82 @@ +package ccipexec + +import ( + "sync" + "time" + + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// InflightInternalExecutionReport serves the same purpose as InflightCommitReport +// see the comment on that struct for context. +type InflightInternalExecutionReport struct { + createdAt time.Time + messages []cciptypes.EVM2EVMMessage +} + +// inflightExecReportsContainer holds existing inflight reports. +// it provides a thread-safe access as it is called from multiple goroutines, +// e.g. reporting and transmission protocols. +type inflightExecReportsContainer struct { + locker sync.RWMutex + reports []InflightInternalExecutionReport + + cacheExpiry time.Duration +} + +func newInflightExecReportsContainer(inflightCacheExpiry time.Duration) *inflightExecReportsContainer { + return &inflightExecReportsContainer{ + locker: sync.RWMutex{}, + reports: make([]InflightInternalExecutionReport, 0), + cacheExpiry: inflightCacheExpiry, + } +} + +func (container *inflightExecReportsContainer) getAll() []InflightInternalExecutionReport { + container.locker.RLock() + defer container.locker.RUnlock() + + reports := make([]InflightInternalExecutionReport, len(container.reports)) + copy(reports[:], container.reports[:]) + + return reports +} + +func (container *inflightExecReportsContainer) expire(lggr logger.Logger) { + container.locker.Lock() + defer container.locker.Unlock() + // Reap old inflight txs and check if any messages in the report are inflight. + var stillInFlight []InflightInternalExecutionReport + for _, report := range container.reports { + if time.Since(report.createdAt) > container.cacheExpiry { + // Happy path: inflight report was successfully transmitted onchain, we remove it from inflight and onchain state reflects inflight. + // Sad path: inflight report reverts onchain, we remove it from inflight, onchain state does not reflect the change so we retry. + lggr.Infow("Inflight report expired", "messages", report.messages) + } else { + stillInFlight = append(stillInFlight, report) + } + } + container.reports = stillInFlight +} + +func (container *inflightExecReportsContainer) add(lggr logger.Logger, messages []cciptypes.EVM2EVMMessage) error { + container.locker.Lock() + defer container.locker.Unlock() + + for _, report := range container.reports { + if (len(report.messages) > 0) && (report.messages[0].SequenceNumber == messages[0].SequenceNumber) { + return errors.Errorf("report is already in flight") + } + } + + // Otherwise not already in flight, add it. + lggr.Info("Inflight report added") + container.reports = append(container.reports, InflightInternalExecutionReport{ + createdAt: time.Now(), + messages: messages, + }) + return nil +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go b/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go new file mode 100644 index 00000000000..2a91457ef4f --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/inflight_test.go @@ -0,0 +1,42 @@ +package ccipexec + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestInflightReportsContainer_add(t *testing.T) { + lggr := logger.TestLogger(t) + container := newInflightExecReportsContainer(time.Second) + + err := container.add(lggr, []cciptypes.EVM2EVMMessage{ + {SequenceNumber: 1}, {SequenceNumber: 2}, {SequenceNumber: 3}, + }) + require.NoError(t, err) + err = container.add(lggr, []cciptypes.EVM2EVMMessage{ + {SequenceNumber: 1}, + }) + require.Error(t, err) + require.Equal(t, "report is already in flight", err.Error()) + require.Equal(t, 1, len(container.getAll())) +} + +func TestInflightReportsContainer_expire(t *testing.T) { + lggr := logger.TestLogger(t) + container := newInflightExecReportsContainer(time.Second) + + err := container.add(lggr, []cciptypes.EVM2EVMMessage{ + {SequenceNumber: 1}, {SequenceNumber: 2}, {SequenceNumber: 3}, + }) + require.NoError(t, err) + container.reports[0].createdAt = time.Now().Add(-time.Second * 5) + require.Equal(t, 1, len(container.getAll())) + + container.expire(lggr) + require.Equal(t, 0, len(container.getAll())) +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/initializers.go b/core/services/ocr2/plugins/ccip/ccipexec/initializers.go new file mode 100644 index 00000000000..7826f6058fe --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/initializers.go @@ -0,0 +1,228 @@ +package ccipexec + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "time" + + "github.com/smartcontractkit/chainlink-common/pkg/types" + + "github.com/Masterminds/semver/v3" + "go.uber.org/multierr" + + libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus" + + commonlogger "github.com/smartcontractkit/chainlink-common/pkg/logger" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/oraclelib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/promwrapper" +) + +var ( + // tokenDataWorkerTimeout defines 1) The timeout while waiting for a bg call to the token data 3P provider. + // 2) When a client requests token data and does not specify a timeout this value is used as a default. + // 5 seconds is a reasonable value for a timeout. + // At this moment, minimum OCR Delta Round is set to 30s and deltaGrace to 5s. Based on this configuration + // 5s for token data worker timeout is a reasonable default. + tokenDataWorkerTimeout = 5 * time.Second + // tokenDataWorkerNumWorkers is the number of workers that will be processing token data in parallel. + tokenDataWorkerNumWorkers = 5 +) + +var defaultNewReportingPluginRetryConfig = ccipdata.RetryConfig{InitialDelay: time.Second, MaxDelay: 5 * time.Minute} + +func NewExecServices(ctx context.Context, lggr logger.Logger, jb job.Job, srcProvider types.CCIPExecProvider, dstProvider types.CCIPExecProvider, srcChainID int64, dstChainID int64, new bool, argsNoPlugin libocr2.OCR2OracleArgs, logError func(string)) ([]job.ServiceCtx, error) { + if jb.OCR2OracleSpec == nil { + return nil, fmt.Errorf("spec is nil") + } + spec := jb.OCR2OracleSpec + var pluginConfig ccipconfig.ExecPluginJobSpecConfig + err := json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig) + if err != nil { + return nil, err + } + + offRampAddress := ccipcalc.HexToAddress(spec.ContractID) + offRampReader, err := dstProvider.NewOffRampReader(ctx, offRampAddress) + if err != nil { + return nil, fmt.Errorf("create offRampReader: %w", err) + } + + offRampConfig, err := offRampReader.GetStaticConfig(ctx) + if err != nil { + return nil, fmt.Errorf("get offRamp static config: %w", err) + } + + srcChainSelector := offRampConfig.SourceChainSelector + dstChainSelector := offRampConfig.ChainSelector + onRampReader, err := srcProvider.NewOnRampReader(ctx, offRampConfig.OnRamp, srcChainSelector, dstChainSelector) + if err != nil { + return nil, fmt.Errorf("create onRampReader: %w", err) + } + + dynamicOnRampConfig, err := onRampReader.GetDynamicConfig(ctx) + if err != nil { + return nil, fmt.Errorf("get onramp dynamic config: %w", err) + } + + sourceWrappedNative, err := srcProvider.SourceNativeToken(ctx, dynamicOnRampConfig.Router) + if err != nil { + return nil, fmt.Errorf("get source wrapped native token: %w", err) + } + + srcCommitStore, err := srcProvider.NewCommitStoreReader(ctx, offRampConfig.CommitStore) + if err != nil { + return nil, fmt.Errorf("could not create src commitStoreReader reader: %w", err) + } + + dstCommitStore, err := dstProvider.NewCommitStoreReader(ctx, offRampConfig.CommitStore) + if err != nil { + return nil, fmt.Errorf("could not create dst commitStoreReader reader: %w", err) + } + + var commitStoreReader ccipdata.CommitStoreReader + commitStoreReader = ccip.NewProviderProxyCommitStoreReader(srcCommitStore, dstCommitStore) + + tokenDataProviders := make(map[cciptypes.Address]tokendata.Reader) + // init usdc token data provider + if pluginConfig.USDCConfig.AttestationAPI != "" { + lggr.Infof("USDC token data provider enabled") + err2 := pluginConfig.USDCConfig.ValidateUSDCConfig() + if err2 != nil { + return nil, err2 + } + + usdcReader, err2 := srcProvider.NewTokenDataReader(ctx, ccip.EvmAddrToGeneric(pluginConfig.USDCConfig.SourceTokenAddress)) + if err2 != nil { + return nil, fmt.Errorf("new usdc reader: %w", err2) + } + tokenDataProviders[cciptypes.Address(pluginConfig.USDCConfig.SourceTokenAddress.String())] = usdcReader + } + + // Prom wrappers + onRampReader = observability.NewObservedOnRampReader(onRampReader, srcChainID, ccip.ExecPluginLabel) + commitStoreReader = observability.NewObservedCommitStoreReader(commitStoreReader, dstChainID, ccip.ExecPluginLabel) + offRampReader = observability.NewObservedOffRampReader(offRampReader, dstChainID, ccip.ExecPluginLabel) + metricsCollector := ccip.NewPluginMetricsCollector(ccip.ExecPluginLabel, srcChainID, dstChainID) + + tokenPoolBatchedReader, err := dstProvider.NewTokenPoolBatchedReader(ctx, offRampAddress, srcChainSelector) + if err != nil { + return nil, fmt.Errorf("new token pool batched reader: %w", err) + } + + chainHealthcheck := cache.NewObservedChainHealthCheck( + cache.NewChainHealthcheck( + // Adding more details to Logger to make healthcheck logs more informative + // It's safe because healthcheck logs only in case of unhealthy state + lggr.With( + "onramp", offRampConfig.OnRamp, + "commitStore", offRampConfig.CommitStore, + "offramp", offRampAddress, + ), + onRampReader, + commitStoreReader, + ), + ccip.ExecPluginLabel, + srcChainID, + dstChainID, + offRampConfig.OnRamp, + ) + + tokenBackgroundWorker := tokendata.NewBackgroundWorker( + tokenDataProviders, + tokenDataWorkerNumWorkers, + tokenDataWorkerTimeout, + 2*tokenDataWorkerTimeout, + ) + + wrappedPluginFactory := NewExecutionReportingPluginFactory(ExecutionPluginStaticConfig{ + lggr: lggr, + onRampReader: onRampReader, + commitStoreReader: commitStoreReader, + offRampReader: offRampReader, + sourcePriceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(srcProvider), + sourceWrappedNativeToken: sourceWrappedNative, + destChainSelector: dstChainSelector, + priceRegistryProvider: ccip.NewChainAgnosticPriceRegistry(dstProvider), + tokenPoolBatchedReader: tokenPoolBatchedReader, + tokenDataWorker: tokenBackgroundWorker, + metricsCollector: metricsCollector, + chainHealthcheck: chainHealthcheck, + newReportingPluginRetryConfig: defaultNewReportingPluginRetryConfig, + txmStatusChecker: statuschecker.NewTxmStatusChecker(dstProvider.GetTransactionStatus), + }) + + argsNoPlugin.ReportingPluginFactory = promwrapper.NewPromFactory(wrappedPluginFactory, "CCIPExecution", jb.OCR2OracleSpec.Relay, big.NewInt(0).SetInt64(dstChainID)) + argsNoPlugin.Logger = commonlogger.NewOCRWrapper(lggr, true, logError) + oracle, err := libocr2.NewOracle(argsNoPlugin) + if err != nil { + return nil, err + } + // If this is a brand-new job, then we make use of the start blocks. If not then we're rebooting and log poller will pick up where we left off. + if new { + return []job.ServiceCtx{ + oraclelib.NewChainAgnosticBackFilledOracle( + lggr, + srcProvider, + dstProvider, + job.NewServiceAdapter(oracle), + ), + chainHealthcheck, + tokenBackgroundWorker, + }, nil + } + return []job.ServiceCtx{ + job.NewServiceAdapter(oracle), + chainHealthcheck, + tokenBackgroundWorker, + }, nil +} + +// UnregisterExecPluginLpFilters unregisters all the registered filters for both source and dest chains. +// See comment in UnregisterCommitPluginLpFilters +// It MUST mirror the filters registered in NewExecServices. +// This currently works because the filters registered by the created custom providers when the job is first added +// are stored in the db. Those same filters are unregistered (i.e. deleted from the db) by the newly created providers +// that are passed in from cleanupEVM, as while the providers have no knowledge of each other, they are created +// on the same source and dest relayer. +func UnregisterExecPluginLpFilters(srcProvider types.CCIPExecProvider, dstProvider types.CCIPExecProvider) error { + unregisterFuncs := []func() error{ + func() error { + return srcProvider.Close() + }, + func() error { + return dstProvider.Close() + }, + } + + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} + +// ExecReportToEthTxMeta generates a txmgr.EthTxMeta from the given report. +// Only MessageIDs will be populated in the TxMeta. +func ExecReportToEthTxMeta(ctx context.Context, typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) { + return factory.ExecReportToEthTxMeta(ctx, typ, ver) +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go b/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go new file mode 100644 index 00000000000..4a09cf37b45 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/ocr2.go @@ -0,0 +1,845 @@ +package ccipexec + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker" +) + +const ( + // exec Report should make sure to cap returned payload to this limit + MaxExecutionReportLength = 250_000 + + // MaxDataLenPerBatch limits the total length of msg data that can be in a batch. + MaxDataLenPerBatch = 60_000 + + // MaximumAllowedTokenDataWaitTimePerBatch defines the maximum time that is allowed + // for the plugin to wait for token data to be fetched from external providers per batch. + MaximumAllowedTokenDataWaitTimePerBatch = 2 * time.Second + + // MessagesIterationStep limits number of messages fetched to memory at once when iterating through unexpired CommitRoots + MessagesIterationStep = 1024 +) + +var ( + _ types.ReportingPluginFactory = &ExecutionReportingPluginFactory{} + _ types.ReportingPlugin = &ExecutionReportingPlugin{} +) + +type ExecutionPluginStaticConfig struct { + lggr logger.Logger + onRampReader ccipdata.OnRampReader + offRampReader ccipdata.OffRampReader + commitStoreReader ccipdata.CommitStoreReader + sourcePriceRegistryProvider ccipdataprovider.PriceRegistry + sourceWrappedNativeToken cciptypes.Address + tokenDataWorker tokendata.Worker + destChainSelector uint64 + priceRegistryProvider ccipdataprovider.PriceRegistry // destination price registry provider. + tokenPoolBatchedReader batchreader.TokenPoolBatchedReader + metricsCollector ccip.PluginMetricsCollector + chainHealthcheck cache.ChainHealthcheck + newReportingPluginRetryConfig ccipdata.RetryConfig + txmStatusChecker statuschecker.CCIPTransactionStatusChecker +} + +type ExecutionReportingPlugin struct { + // Misc + F int + lggr logger.Logger + offchainConfig cciptypes.ExecOffchainConfig + tokenDataWorker tokendata.Worker + metricsCollector ccip.PluginMetricsCollector + batchingStrategy BatchingStrategy + + // Source + gasPriceEstimator prices.GasPriceEstimatorExec + sourcePriceRegistry ccipdata.PriceRegistryReader + sourcePriceRegistryProvider ccipdataprovider.PriceRegistry + sourcePriceRegistryLock sync.RWMutex + sourceWrappedNativeToken cciptypes.Address + onRampReader ccipdata.OnRampReader + + // Dest + commitStoreReader ccipdata.CommitStoreReader + destPriceRegistry ccipdata.PriceRegistryReader + destWrappedNative cciptypes.Address + onchainConfig cciptypes.ExecOnchainConfig + offRampReader ccipdata.OffRampReader + tokenPoolBatchedReader batchreader.TokenPoolBatchedReader + + // State + inflightReports *inflightExecReportsContainer + commitRootsCache cache.CommitsRootsCache + chainHealthcheck cache.ChainHealthcheck +} + +func (r *ExecutionReportingPlugin) Query(context.Context, types.ReportTimestamp) (types.Query, error) { + return types.Query{}, nil +} + +func (r *ExecutionReportingPlugin) Observation(ctx context.Context, timestamp types.ReportTimestamp, query types.Query) (types.Observation, error) { + lggr := r.lggr.Named("ExecutionObservation") + if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil { + return nil, err + } else if !healthy { + return nil, ccip.ErrChainIsNotHealthy + } + + // Ensure that the source price registry is synchronized with the onRamp. + if err := r.ensurePriceRegistrySynchronization(ctx); err != nil { + return nil, fmt.Errorf("ensuring price registry synchronization: %w", err) + } + + // Expire any inflight reports. + r.inflightReports.expire(lggr) + inFlight := r.inflightReports.getAll() + + executableObservations, err := r.getExecutableObservations(ctx, lggr, inFlight) + if err != nil { + return nil, err + } + // cap observations which fits MaxObservationLength (after serialized) + capped := sort.Search(len(executableObservations), func(i int) bool { + var encoded []byte + encoded, err = ccip.NewExecutionObservation(executableObservations[:i+1]).Marshal() + if err != nil { + // false makes Search keep looking to the right, always including any "erroring" ObservedMessage and allowing us to detect in the bottom + return false + } + return len(encoded) > ccip.MaxObservationLength + }) + if err != nil { + return nil, err + } + executableObservations = executableObservations[:capped] + r.metricsCollector.NumberOfMessagesProcessed(ccip.Observation, len(executableObservations)) + lggr.Infow("Observation", "executableMessages", executableObservations) + // Note can be empty + return ccip.NewExecutionObservation(executableObservations).Marshal() +} + +func (r *ExecutionReportingPlugin) getExecutableObservations(ctx context.Context, lggr logger.Logger, inflight []InflightInternalExecutionReport) ([]ccip.ObservedMessage, error) { + unexpiredReports, err := r.commitRootsCache.RootsEligibleForExecution(ctx) + if err != nil { + return nil, err + } + r.metricsCollector.UnexpiredCommitRoots(len(unexpiredReports)) + + if len(unexpiredReports) == 0 { + return []ccip.ObservedMessage{}, nil + } + + getExecTokenData := cache.LazyFunction[execTokenData](func() (execTokenData, error) { + return r.prepareTokenExecData(ctx) + }) + + for j := 0; j < len(unexpiredReports); { + unexpiredReportsPart, step := selectReportsToFillBatch(unexpiredReports[j:], MessagesIterationStep) + j += step + + unexpiredReportsWithSendReqs, err := r.getReportsWithSendRequests(ctx, unexpiredReportsPart) + if err != nil { + return nil, err + } + + for _, unexpiredReport := range unexpiredReportsWithSendReqs { + r.tokenDataWorker.AddJobsFromMsgs(ctx, unexpiredReport.sendRequestsWithMeta) + } + + for _, rep := range unexpiredReportsWithSendReqs { + if ctx.Err() != nil { + lggr.Warn("Processing of roots killed by context") + break + } + + merkleRoot := rep.commitReport.MerkleRoot + + rootLggr := lggr.With("root", hexutil.Encode(merkleRoot[:]), + "minSeqNr", rep.commitReport.Interval.Min, + "maxSeqNr", rep.commitReport.Interval.Max, + ) + + if err := rep.validate(); err != nil { + rootLggr.Errorw("Skipping invalid report", "err", err) + continue + } + + // If all messages are already executed and finalized, snooze the root for + // config.PermissionLessExecutionThresholdSeconds so it will never be considered again. + if allMsgsExecutedAndFinalized := rep.allRequestsAreExecutedAndFinalized(); allMsgsExecutedAndFinalized { + rootLggr.Infow("Snoozing root forever since there are no executable txs anymore", "root", hex.EncodeToString(merkleRoot[:])) + r.commitRootsCache.MarkAsExecuted(merkleRoot) + continue + } + + blessed, err := r.commitStoreReader.IsBlessed(ctx, merkleRoot) + if err != nil { + return nil, err + } + if !blessed { + rootLggr.Infow("Report is accepted but not blessed") + continue + } + + tokenExecData, err := getExecTokenData() + if err != nil { + return nil, err + } + + batch, msgExecStates := r.buildBatch( + ctx, + inflight, + rootLggr, + rep, + tokenExecData.rateLimiterTokenBucket.Tokens, + tokenExecData.sourceTokenPrices, + tokenExecData.destTokenPrices, + tokenExecData.gasPrice, + tokenExecData.sourceToDestTokens) + if len(batch) != 0 { + lggr.Infow("Execution batch created", "batchSize", len(batch), "messageStates", msgExecStates) + return batch, nil + } + r.commitRootsCache.Snooze(merkleRoot) + } + } + return []ccip.ObservedMessage{}, nil +} + +// Calculates a map that indicates whether a sequence number has already been executed. +// It doesn't matter if the execution succeeded, since we don't retry previous +// attempts even if they failed. Value in the map indicates whether the log is finalized or not. +func (r *ExecutionReportingPlugin) getExecutedSeqNrsInRange(ctx context.Context, min, max uint64) (map[uint64]bool, error) { + stateChanges, err := r.offRampReader.GetExecutionStateChangesBetweenSeqNums( + ctx, + min, + max, + int(r.offchainConfig.DestOptimisticConfirmations), + ) + if err != nil { + return nil, err + } + executedMp := make(map[uint64]bool, len(stateChanges)) + for _, stateChange := range stateChanges { + executedMp[stateChange.SequenceNumber] = stateChange.TxMeta.IsFinalized() + } + return executedMp, nil +} + +// Builds a batch of transactions that can be executed, takes into account +// the available gas, rate limiting, execution state, nonce state, and +// profitability of execution. +func (r *ExecutionReportingPlugin) buildBatch( + ctx context.Context, + inflight []InflightInternalExecutionReport, + lggr logger.Logger, + report commitReportWithSendRequests, + aggregateTokenLimit *big.Int, + sourceTokenPricesUSD map[cciptypes.Address]*big.Int, + destTokenPricesUSD map[cciptypes.Address]*big.Int, + gasPrice *big.Int, + sourceToDestToken map[cciptypes.Address]cciptypes.Address, +) ([]ccip.ObservedMessage, []messageExecStatus) { + // We assume that next observation will start after previous epoch transmission so nonces should be already updated onchain. + // Worst case scenario we will try to process the same message again, and it will be skipped but protocol would progress anyway. + // We don't use inflightCache here to avoid cases in which inflight cache keeps progressing but due to transmission failures + // previous reports are not included onchain. That can lead to issues with IncorrectNonce skips, + // because we enforce sequential processing per sender (per sender's nonce ordering is enforced by Offramp contract) + sendersNonce, err := r.offRampReader.ListSenderNonces(ctx, report.uniqueSenders()) + if err != nil { + lggr.Errorw("Fetching senders nonce", "err", err) + return []ccip.ObservedMessage{}, []messageExecStatus{} + } + + inflightAggregateValue, err := getInflightAggregateRateLimit(lggr, inflight, destTokenPricesUSD, sourceToDestToken) + if err != nil { + lggr.Errorw("Unexpected error computing inflight values", "err", err) + return []ccip.ObservedMessage{}, nil + } + + batchCtx := &BatchContext{ + report, + inflight, + inflightAggregateValue, + lggr, + MaxDataLenPerBatch, + uint64(r.offchainConfig.BatchGasLimit), + make(map[cciptypes.Address]uint64), + sendersNonce, + sourceTokenPricesUSD, + destTokenPricesUSD, + gasPrice, + sourceToDestToken, + aggregateTokenLimit, + MaximumAllowedTokenDataWaitTimePerBatch, + r.tokenDataWorker, + r.gasPriceEstimator, + r.destWrappedNative, + r.offchainConfig, + } + + return r.batchingStrategy.BuildBatch(ctx, batchCtx) +} + +func calculateMessageMaxGas(gasLimit *big.Int, numRequests, dataLen, numTokens int) (uint64, error) { + if !gasLimit.IsUint64() { + return 0, fmt.Errorf("gas limit %s cannot be casted to uint64", gasLimit) + } + + gasLimitU64 := gasLimit.Uint64() + gasOverHeadGas := maxGasOverHeadGas(numRequests, dataLen, numTokens) + messageMaxGas := gasLimitU64 + gasOverHeadGas + + if messageMaxGas < gasLimitU64 || messageMaxGas < gasOverHeadGas { + return 0, fmt.Errorf("message max gas overflow, gasLimit=%d gasOverHeadGas=%d", gasLimitU64, gasOverHeadGas) + } + + return messageMaxGas, nil +} + +// getReportsWithSendRequests returns the target reports with populated send requests. +func (r *ExecutionReportingPlugin) getReportsWithSendRequests( + ctx context.Context, + reports []cciptypes.CommitStoreReport, +) ([]commitReportWithSendRequests, error) { + if len(reports) == 0 { + return nil, nil + } + + // find interval from all the reports + intervalMin := reports[0].Interval.Min + intervalMax := reports[0].Interval.Max + for _, report := range reports[1:] { + if report.Interval.Max > intervalMax { + intervalMax = report.Interval.Max + } + if report.Interval.Min < intervalMin { + intervalMin = report.Interval.Min + } + } + + // use errgroup to fetch send request logs and executed sequence numbers in parallel + eg := &errgroup.Group{} + + var sendRequests []cciptypes.EVM2EVMMessageWithTxMeta + eg.Go(func() error { + // We don't need to double-check if logs are finalized because we already checked that in the Commit phase. + sendReqs, err := r.onRampReader.GetSendRequestsBetweenSeqNums(ctx, intervalMin, intervalMax, false) + if err != nil { + return err + } + sendRequests = sendReqs + return nil + }) + + var executedSeqNums map[uint64]bool + eg.Go(func() error { + // get executed sequence numbers + executedMp, err := r.getExecutedSeqNrsInRange(ctx, intervalMin, intervalMax) + if err != nil { + return err + } + executedSeqNums = executedMp + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + reportsWithSendReqs := make([]commitReportWithSendRequests, len(reports)) + for i, report := range reports { + reportsWithSendReqs[i] = commitReportWithSendRequests{ + commitReport: report, + sendRequestsWithMeta: make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, 0, report.Interval.Max-report.Interval.Min+1), + } + } + + for _, sendReq := range sendRequests { + // if value exists in the map then it's executed + // if value exists, and it's true then it's considered finalized + finalized, executed := executedSeqNums[sendReq.SequenceNumber] + + reqWithMeta := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: sendReq.EVM2EVMMessage, + BlockTimestamp: time.UnixMilli(sendReq.BlockTimestampUnixMilli), + Executed: executed, + Finalized: finalized, + LogIndex: uint(sendReq.LogIndex), + TxHash: sendReq.TxHash, + } + + // attach the msg to the appropriate reports + for i := range reportsWithSendReqs { + if reportsWithSendReqs[i].sendReqFits(reqWithMeta) { + reportsWithSendReqs[i].sendRequestsWithMeta = append(reportsWithSendReqs[i].sendRequestsWithMeta, reqWithMeta) + } + } + } + + return reportsWithSendReqs, nil +} + +// Assumes non-empty report. Messages to execute can span more than one report, but are assumed to be in order of increasing +// sequence number. +func (r *ExecutionReportingPlugin) buildReport(ctx context.Context, lggr logger.Logger, observedMessages []ccip.ObservedMessage) ([]byte, error) { + if err := validateSeqNumbers(ctx, r.commitStoreReader, observedMessages); err != nil { + return nil, err + } + commitReport, err := getCommitReportForSeqNum(ctx, r.commitStoreReader, observedMessages[0].SeqNr) + if err != nil { + return nil, err + } + lggr.Infow("Building execution report", "observations", observedMessages, "merkleRoot", hexutil.Encode(commitReport.MerkleRoot[:]), "report", commitReport) + + sendReqsInRoot, _, tree, err := getProofData(ctx, r.onRampReader, commitReport.Interval) + if err != nil { + return nil, err + } + + // cap messages which fits MaxExecutionReportLength (after serialized) + capped := sort.Search(len(observedMessages), func(i int) bool { + report, err2 := buildExecutionReportForMessages(sendReqsInRoot, tree, commitReport.Interval, observedMessages[:i+1]) + if err2 != nil { + r.lggr.Errorw("build execution report", "err", err2) + return false + } + + encoded, err2 := r.offRampReader.EncodeExecutionReport(ctx, report) + if err2 != nil { + // false makes Search keep looking to the right, always including any "erroring" ObservedMessage and allowing us to detect in the bottom + return false + } + return len(encoded) > MaxExecutionReportLength + }) + + execReport, err := buildExecutionReportForMessages(sendReqsInRoot, tree, commitReport.Interval, observedMessages[:capped]) + if err != nil { + return nil, err + } + + encodedReport, err := r.offRampReader.EncodeExecutionReport(ctx, execReport) + if err != nil { + return nil, err + } + + if capped < len(observedMessages) { + lggr.Warnf( + "Capping report to fit MaxExecutionReportLength: msgsCount %d -> %d, bytes %d, bytesLimit %d", + len(observedMessages), capped, len(encodedReport), MaxExecutionReportLength, + ) + } + // Double check this verifies before sending. + valid, err := r.commitStoreReader.VerifyExecutionReport(ctx, execReport) + if err != nil { + return nil, errors.Wrap(err, "unable to verify") + } + if !valid { + return nil, errors.New("root does not verify") + } + if len(execReport.Messages) > 0 { + r.metricsCollector.NumberOfMessagesProcessed(ccip.Report, len(execReport.Messages)) + r.metricsCollector.SequenceNumber(ccip.Report, execReport.Messages[len(execReport.Messages)-1].SequenceNumber) + } + return encodedReport, nil +} + +func (r *ExecutionReportingPlugin) Report(ctx context.Context, timestamp types.ReportTimestamp, query types.Query, observations []types.AttributedObservation) (bool, types.Report, error) { + lggr := r.lggr.Named("ExecutionReport") + if healthy, err := r.chainHealthcheck.IsHealthy(ctx); err != nil { + return false, nil, err + } else if !healthy { + return false, nil, ccip.ErrChainIsNotHealthy + } + parsableObservations := ccip.GetParsableObservations[ccip.ExecutionObservation](lggr, observations) + // Need at least F+1 observations + if len(parsableObservations) <= r.F { + lggr.Warn("Non-empty observations <= F, need at least F+1 to continue") + return false, nil, nil + } + + observedMessages, err := calculateObservedMessagesConsensus(parsableObservations, r.F) + if err != nil { + return false, nil, err + } + if len(observedMessages) == 0 { + return false, nil, nil + } + + report, err := r.buildReport(ctx, lggr, observedMessages) + if err != nil { + return false, nil, err + } + lggr.Infow("Report", "executableObservations", observedMessages) + return true, report, nil +} + +type tallyKey struct { + seqNr uint64 + tokenDataHash [32]byte +} + +type tallyVal struct { + tally int + tokenData [][]byte +} + +func calculateObservedMessagesConsensus(observations []ccip.ExecutionObservation, f int) ([]ccip.ObservedMessage, error) { + tally := make(map[tallyKey]tallyVal) + for _, obs := range observations { + for seqNr, msgData := range obs.Messages { + tokenDataHash, err := hashutil.BytesOfBytesKeccak(msgData.TokenData) + if err != nil { + return nil, fmt.Errorf("bytes of bytes keccak: %w", err) + } + + key := tallyKey{seqNr: seqNr, tokenDataHash: tokenDataHash} + if val, ok := tally[key]; ok { + tally[key] = tallyVal{tally: val.tally + 1, tokenData: msgData.TokenData} + } else { + tally[key] = tallyVal{tally: 1, tokenData: msgData.TokenData} + } + } + } + + // We might have different token data for the same sequence number. + // For that purpose we want to keep the token data with the most occurrences. + seqNumTally := make(map[uint64]tallyVal) + + // order tally keys to make looping over the entries deterministic + tallyKeys := make([]tallyKey, 0, len(tally)) + for key := range tally { + tallyKeys = append(tallyKeys, key) + } + sort.Slice(tallyKeys, func(i, j int) bool { + return hex.EncodeToString(tallyKeys[i].tokenDataHash[:]) < hex.EncodeToString(tallyKeys[j].tokenDataHash[:]) + }) + + for _, key := range tallyKeys { + tallyInfo := tally[key] + existingTally, exists := seqNumTally[key.seqNr] + if tallyInfo.tally > f && (!exists || tallyInfo.tally > existingTally.tally) { + seqNumTally[key.seqNr] = tallyInfo + } + } + + finalSequenceNumbers := make([]ccip.ObservedMessage, 0, len(seqNumTally)) + for seqNr, tallyInfo := range seqNumTally { + finalSequenceNumbers = append(finalSequenceNumbers, ccip.NewObservedMessage(seqNr, tallyInfo.tokenData)) + } + // buildReport expects sorted sequence numbers (tally map is non-deterministic). + sort.Slice(finalSequenceNumbers, func(i, j int) bool { + return finalSequenceNumbers[i].SeqNr < finalSequenceNumbers[j].SeqNr + }) + return finalSequenceNumbers, nil +} + +func (r *ExecutionReportingPlugin) ShouldAcceptFinalizedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) { + lggr := r.lggr.Named("ShouldAcceptFinalizedReport") + execReport, err := r.offRampReader.DecodeExecutionReport(ctx, report) + if err != nil { + lggr.Errorw("Unable to decode report", "err", err) + return false, err + } + lggr = lggr.With("messageIDs", ccipcommon.GetMessageIDsAsHexString(execReport.Messages)) + + if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil { + return false, err1 + } else if !healthy { + return false, ccip.ErrChainIsNotHealthy + } + // If the first message is executed already, this execution report is stale, and we do not accept it. + stale, err := r.isStaleReport(ctx, execReport.Messages) + if err != nil { + return false, err + } + if stale { + lggr.Info("Execution report is stale") + return false, nil + } + // Else just assume in flight + if err = r.inflightReports.add(lggr, execReport.Messages); err != nil { + return false, err + } + if len(execReport.Messages) > 0 { + r.metricsCollector.SequenceNumber(ccip.ShouldAccept, execReport.Messages[len(execReport.Messages)-1].SequenceNumber) + } + lggr.Info("Accepting finalized report") + return true, nil +} + +func (r *ExecutionReportingPlugin) ShouldTransmitAcceptedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) { + lggr := r.lggr.Named("ShouldTransmitAcceptedReport") + execReport, err := r.offRampReader.DecodeExecutionReport(ctx, report) + if err != nil { + lggr.Errorw("Unable to decode report", "err", err) + return false, nil + } + lggr = lggr.With("messageIDs", ccipcommon.GetMessageIDsAsHexString(execReport.Messages)) + + if healthy, err1 := r.chainHealthcheck.IsHealthy(ctx); err1 != nil { + return false, err1 + } else if !healthy { + return false, ccip.ErrChainIsNotHealthy + } + // If report is not stale we transmit. + // When the executeTransmitter enqueues the tx for tx manager, + // we mark it as execution_sent, removing it from the set of inflight messages. + stale, err := r.isStaleReport(ctx, execReport.Messages) + if err != nil { + return false, err + } + if stale { + lggr.Info("Execution report is stale") + return false, nil + } + + lggr.Info("Transmitting finalized report") + return true, err +} + +func (r *ExecutionReportingPlugin) isStaleReport(ctx context.Context, messages []cciptypes.EVM2EVMMessage) (bool, error) { + if len(messages) == 0 { + return true, fmt.Errorf("messages are empty") + } + + // If the first message is executed already, this execution report is stale. + // Note the default execution state, including for arbitrary seq number not yet committed + // is ExecutionStateUntouched. + msgState, err := r.offRampReader.GetExecutionState(ctx, messages[0].SequenceNumber) + if err != nil { + return true, err + } + if state := cciptypes.MessageExecutionState(msgState); state == cciptypes.ExecutionStateFailure || state == cciptypes.ExecutionStateSuccess { + return true, nil + } + + return false, nil +} + +func (r *ExecutionReportingPlugin) Close() error { + return nil +} + +func getInflightAggregateRateLimit( + lggr logger.Logger, + inflight []InflightInternalExecutionReport, + destTokenPrices map[cciptypes.Address]*big.Int, + sourceToDest map[cciptypes.Address]cciptypes.Address, +) (*big.Int, error) { + inflightAggregateValue := big.NewInt(0) + + for _, rep := range inflight { + for _, message := range rep.messages { + msgValue, err := aggregateTokenValue(lggr, destTokenPrices, sourceToDest, message.TokenAmounts) + if err != nil { + return nil, err + } + inflightAggregateValue.Add(inflightAggregateValue, msgValue) + } + } + return inflightAggregateValue, nil +} + +// getTokensPrices returns token prices of the given price registry, +// price values are USD per 1e18 of smallest token denomination, in base units 1e18 (e.g. 5$ = 5e18 USD per 1e18 units). +// this function is used for price registry of both source and destination chains. +func getTokensPrices(ctx context.Context, priceRegistry ccipdata.PriceRegistryReader, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) { + tokenPrices := make(map[cciptypes.Address]*big.Int) + + fetchedPrices, err := priceRegistry.GetTokenPrices(ctx, tokens) + if err != nil { + return nil, errors.Wrapf(err, "could not get token prices of %v", tokens) + } + + // price registry should always return a price per token ordered by input tokens + if len(fetchedPrices) != len(tokens) { + return nil, fmt.Errorf("token prices length exp=%d actual=%d", len(tokens), len(fetchedPrices)) + } + + for i, token := range tokens { + // price of a token can never be zero + if fetchedPrices[i].Value.BitLen() == 0 { + priceRegistryAddress, err := priceRegistry.Address(ctx) + if err != nil { + return nil, fmt.Errorf("get price registry address: %w", err) + } + return nil, fmt.Errorf("price of token %s is zero (price registry=%s)", token, priceRegistryAddress) + } + + // price registry should not report different price for the same token + price, exists := tokenPrices[token] + if exists && fetchedPrices[i].Value.Cmp(price) != 0 { + return nil, fmt.Errorf("price registry reported different prices (%s and %s) for the same token %s", + fetchedPrices[i].Value, price, token) + } + + tokenPrices[token] = fetchedPrices[i].Value + } + + return tokenPrices, nil +} + +type execTokenData struct { + rateLimiterTokenBucket cciptypes.TokenBucketRateLimit + sourceTokenPrices map[cciptypes.Address]*big.Int + destTokenPrices map[cciptypes.Address]*big.Int + sourceToDestTokens map[cciptypes.Address]cciptypes.Address + gasPrice *big.Int +} + +// prepareTokenExecData gather all the pre-execution data needed for token execution into a single lazy call. +// This is done to avoid fetching the data multiple times for each message. Additionally, most of the RPC calls +// within that function is cached, so it should be relatively fast and not require any RPC batching. +func (r *ExecutionReportingPlugin) prepareTokenExecData(ctx context.Context) (execTokenData, error) { + // This could result in slightly different values on each call as + // the function returns the allowed amount at the time of the last block. + // Since this will only increase over time, the highest observed value will + // always be the lower bound of what would be available on chain + // since we already account for inflight txs. + rateLimiterTokenBucket, err := r.offRampReader.CurrentRateLimiterState(ctx) + if err != nil { + return execTokenData{}, err + } + + sourceFeeTokens, err := r.sourcePriceRegistry.GetFeeTokens(ctx) + if err != nil { + return execTokenData{}, fmt.Errorf("get source fee tokens: %w", err) + } + sourceTokensPrices, err := getTokensPrices( + ctx, + r.sourcePriceRegistry, + ccipcommon.FlattenUniqueSlice( + sourceFeeTokens, + []cciptypes.Address{r.sourceWrappedNativeToken}, + ), + ) + if err != nil { + return execTokenData{}, err + } + + destFeeTokens, destBridgedTokens, err := ccipcommon.GetDestinationTokens(ctx, r.offRampReader, r.destPriceRegistry) + if err != nil { + return execTokenData{}, fmt.Errorf("get destination tokens: %w", err) + } + destTokenPrices, err := getTokensPrices( + ctx, + r.destPriceRegistry, + ccipcommon.FlattenUniqueSlice( + destFeeTokens, + destBridgedTokens, + []cciptypes.Address{r.destWrappedNative}, + ), + ) + if err != nil { + return execTokenData{}, err + } + + sourceToDestTokens, err := r.offRampReader.GetSourceToDestTokensMapping(ctx) + if err != nil { + return execTokenData{}, err + } + + gasPrice, err := r.gasPriceEstimator.GetGasPrice(ctx) + if err != nil { + return execTokenData{}, err + } + + return execTokenData{ + rateLimiterTokenBucket: rateLimiterTokenBucket, + sourceTokenPrices: sourceTokensPrices, + sourceToDestTokens: sourceToDestTokens, + destTokenPrices: destTokenPrices, + gasPrice: gasPrice, + }, nil +} + +// ensurePriceRegistrySynchronization ensures that the source price registry points to the same as the one configured on the onRamp. +// This is required since the price registry address on the onRamp can change over time. +func (r *ExecutionReportingPlugin) ensurePriceRegistrySynchronization(ctx context.Context) error { + needPriceRegistryUpdate := false + r.sourcePriceRegistryLock.RLock() + priceRegistryAddress, err := r.onRampReader.SourcePriceRegistryAddress(ctx) + if err != nil { + r.sourcePriceRegistryLock.RUnlock() + return fmt.Errorf("getting price registry from onramp: %w", err) + } + + currentPriceRegistryAddress := cciptypes.Address("") + if r.sourcePriceRegistry != nil { + currentPriceRegistryAddress, err = r.sourcePriceRegistry.Address(ctx) + if err != nil { + return fmt.Errorf("get current priceregistry address: %w", err) + } + } + + needPriceRegistryUpdate = r.sourcePriceRegistry == nil || priceRegistryAddress != currentPriceRegistryAddress + r.sourcePriceRegistryLock.RUnlock() + if !needPriceRegistryUpdate { + return nil + } + + // Update the price registry if required. + r.sourcePriceRegistryLock.Lock() + defer r.sourcePriceRegistryLock.Unlock() + + // Price registry address changed or not initialized yet, updating source price registry. + sourcePriceRegistry, err := r.sourcePriceRegistryProvider.NewPriceRegistryReader(ctx, priceRegistryAddress) + if err != nil { + return err + } + oldPriceRegistry := r.sourcePriceRegistry + r.sourcePriceRegistry = sourcePriceRegistry + // Close the old price registry + if oldPriceRegistry != nil { + if err1 := oldPriceRegistry.Close(); err1 != nil { + r.lggr.Warnw("failed to close old price registry", "err", err1) + } + } + return nil +} + +// selectReportsToFillBatch returns the reports to fill the message limit. Single Commit Root contains exactly (Interval.Max - Interval.Min + 1) messages. +// We keep adding reports until we reach the message limit. Please see the tests for more examples and edge cases. +// unexpiredReports have to be sorted by Interval.Min. Otherwise, the batching logic will not be efficient, +// because it picks messages and execution states based on the report[0].Interval.Min - report[len-1].Interval.Max range. +// Having unexpiredReports not sorted properly will lead to fetching more messages and execution states to the memory than the messagesLimit provided. +// However, logs from LogPoller are returned ordered by (block_number, log_index), so it should preserve the order of Interval.Min. +// Single CommitRoot can have up to 256 messages, with current MessagesIterationStep of 1024, it means processing 4 CommitRoots at once. +func selectReportsToFillBatch(unexpiredReports []cciptypes.CommitStoreReport, messagesLimit uint64) ([]cciptypes.CommitStoreReport, int) { + currentNumberOfMessages := uint64(0) + nbReports := 0 + for _, report := range unexpiredReports { + reportMsgCount := report.Interval.Max - report.Interval.Min + 1 + if currentNumberOfMessages+reportMsgCount > messagesLimit { + break + } + currentNumberOfMessages += reportMsgCount + nbReports++ + } + return unexpiredReports[:nbReports], nbReports +} diff --git a/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go b/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go new file mode 100644 index 00000000000..84cb73c6643 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/ccipexec/ocr2_test.go @@ -0,0 +1,1421 @@ +package ccipexec + +import ( + "bytes" + "context" + "encoding/json" + "math" + "math/big" + "reflect" + "sort" + "sync" + "testing" + "time" + + "github.com/cometbft/cometbft/libs/rand" + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/offchainreporting2/types" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + lpMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + ccipcachemocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader" + ccipdataprovidermocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" +) + +func TestExecutionReportingPlugin_Observation(t *testing.T) { + testCases := []struct { + name string + commitStorePaused bool + sourceChainCursed bool + inflightReports []InflightInternalExecutionReport + unexpiredReports []cciptypes.CommitStoreReportWithTxMeta + sendRequests []cciptypes.EVM2EVMMessageWithTxMeta + executedSeqNums []uint64 + tokenPoolsMapping map[common.Address]common.Address + blessedRoots map[[32]byte]bool + senderNonce uint64 + rateLimiterState cciptypes.TokenBucketRateLimit + expErr bool + sourceChainHealthy bool + destChainHealthy bool + }{ + { + name: "commit store is down", + commitStorePaused: true, + sourceChainCursed: false, + sourceChainHealthy: true, + destChainHealthy: true, + expErr: true, + }, + { + name: "source chain is cursed", + commitStorePaused: false, + sourceChainCursed: true, + sourceChainHealthy: true, + destChainHealthy: true, + expErr: true, + }, + { + name: "source chain not healthy", + commitStorePaused: false, + sourceChainCursed: false, + sourceChainHealthy: false, + destChainHealthy: true, + expErr: true, + }, + { + name: "dest chain not healthy", + commitStorePaused: false, + sourceChainCursed: false, + sourceChainHealthy: true, + destChainHealthy: false, + expErr: true, + }, + { + name: "happy flow", + commitStorePaused: false, + sourceChainCursed: false, + sourceChainHealthy: true, + destChainHealthy: true, + inflightReports: []InflightInternalExecutionReport{}, + unexpiredReports: []cciptypes.CommitStoreReportWithTxMeta{ + { + CommitStoreReport: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 10, Max: 12}, + MerkleRoot: [32]byte{123}, + }, + }, + }, + blessedRoots: map[[32]byte]bool{ + {123}: true, + }, + rateLimiterState: cciptypes.TokenBucketRateLimit{ + IsEnabled: false, + }, + tokenPoolsMapping: map[common.Address]common.Address{}, + senderNonce: 9, + sendRequests: []cciptypes.EVM2EVMMessageWithTxMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10, GasLimit: big.NewInt(0)}, + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 11, GasLimit: big.NewInt(0)}, + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 12, GasLimit: big.NewInt(0)}, + }, + }, + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := &ExecutionReportingPlugin{} + p.inflightReports = newInflightExecReportsContainer(time.Minute) + p.inflightReports.reports = tc.inflightReports + p.lggr = logger.TestLogger(t) + p.tokenDataWorker = tokendata.NewBackgroundWorker( + make(map[cciptypes.Address]tokendata.Reader), 10, 5*time.Second, time.Hour) + p.metricsCollector = ccip.NoopMetricsCollector + + commitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + commitStoreReader.On("IsDown", mock.Anything).Return(tc.commitStorePaused, nil).Maybe() + commitStoreReader.On("IsDestChainHealthy", mock.Anything).Return(tc.destChainHealthy, nil).Maybe() + // Blessed roots return true + for root, blessed := range tc.blessedRoots { + commitStoreReader.On("IsBlessed", mock.Anything, root).Return(blessed, nil).Maybe() + } + commitStoreReader.On("GetAcceptedCommitReportsGteTimestamp", ctx, mock.Anything, 0). + Return(tc.unexpiredReports, nil).Maybe() + p.commitStoreReader = commitStoreReader + + var executionEvents []cciptypes.ExecutionStateChangedWithTxMeta + for _, seqNum := range tc.executedSeqNums { + executionEvents = append(executionEvents, cciptypes.ExecutionStateChangedWithTxMeta{ + ExecutionStateChanged: cciptypes.ExecutionStateChanged{SequenceNumber: seqNum}, + }) + } + + offRamp, _ := testhelpers.NewFakeOffRamp(t) + offRamp.SetRateLimiterState(tc.rateLimiterState) + + tokenPoolBatchedReader, err := batchreader.NewEVMTokenPoolBatchedReader(p.lggr, 0, ccipcalc.EvmAddrToGeneric(offRamp.Address()), nil) + assert.NoError(t, err) + p.tokenPoolBatchedReader = tokenPoolBatchedReader + + mockOffRampReader := ccipdatamocks.NewOffRampReader(t) + mockOffRampReader.On("GetExecutionStateChangesBetweenSeqNums", ctx, mock.Anything, mock.Anything, 0). + Return(executionEvents, nil).Maybe() + mockOffRampReader.On("CurrentRateLimiterState", mock.Anything).Return(tc.rateLimiterState, nil).Maybe() + mockOffRampReader.On("Address", ctx).Return(cciptypes.Address(offRamp.Address().String()), nil).Maybe() + senderNonces := map[cciptypes.Address]uint64{ + cciptypes.Address(utils.RandomAddress().String()): tc.senderNonce, + } + mockOffRampReader.On("ListSenderNonces", mock.Anything, mock.Anything).Return(senderNonces, nil).Maybe() + mockOffRampReader.On("GetTokenPoolsRateLimits", ctx, []ccipdata.TokenPoolReader{}). + Return([]cciptypes.TokenBucketRateLimit{}, nil).Maybe() + + mockOffRampReader.On("GetSourceToDestTokensMapping", ctx).Return(nil, nil).Maybe() + mockOffRampReader.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{ + DestinationTokens: []cciptypes.Address{}, + SourceTokens: []cciptypes.Address{}, + }, nil).Maybe() + p.offRampReader = mockOffRampReader + + mockOnRampReader := ccipdatamocks.NewOnRampReader(t) + mockOnRampReader.On("IsSourceCursed", ctx).Return(tc.sourceChainCursed, nil).Maybe() + mockOnRampReader.On("IsSourceChainHealthy", ctx).Return(tc.sourceChainHealthy, nil).Maybe() + mockOnRampReader.On("GetSendRequestsBetweenSeqNums", ctx, mock.Anything, mock.Anything, false). + Return(tc.sendRequests, nil).Maybe() + sourcePriceRegistryAddress := cciptypes.Address(utils.RandomAddress().String()) + mockOnRampReader.On("SourcePriceRegistryAddress", ctx).Return(sourcePriceRegistryAddress, nil).Maybe() + p.onRampReader = mockOnRampReader + + mockGasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t) + mockGasPriceEstimator.On("GetGasPrice", ctx).Return(big.NewInt(1), nil).Maybe() + p.gasPriceEstimator = mockGasPriceEstimator + + destPriceRegReader := ccipdatamocks.NewPriceRegistryReader(t) + destPriceRegReader.On("GetTokenPrices", ctx, mock.Anything).Return( + []cciptypes.TokenPriceUpdate{{TokenPrice: cciptypes.TokenPrice{Token: ccipcalc.HexToAddress("0x1"), Value: big.NewInt(123)}, TimestampUnixSec: big.NewInt(time.Now().Unix())}}, nil).Maybe() + destPriceRegReader.On("Address", ctx).Return(cciptypes.Address(utils.RandomAddress().String()), nil).Maybe() + destPriceRegReader.On("GetFeeTokens", ctx).Return([]cciptypes.Address{}, nil).Maybe() + sourcePriceRegReader := ccipdatamocks.NewPriceRegistryReader(t) + sourcePriceRegReader.On("Address", ctx).Return(sourcePriceRegistryAddress, nil).Maybe() + sourcePriceRegReader.On("GetFeeTokens", ctx).Return([]cciptypes.Address{}, nil).Maybe() + sourcePriceRegReader.On("GetTokenPrices", ctx, mock.Anything).Return( + []cciptypes.TokenPriceUpdate{{TokenPrice: cciptypes.TokenPrice{Token: ccipcalc.HexToAddress("0x1"), Value: big.NewInt(123)}, TimestampUnixSec: big.NewInt(time.Now().Unix())}}, nil).Maybe() + p.destPriceRegistry = destPriceRegReader + + mockOnRampPriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t) + mockOnRampPriceRegistryProvider.On("NewPriceRegistryReader", ctx, sourcePriceRegistryAddress).Return(sourcePriceRegReader, nil).Maybe() + p.sourcePriceRegistryProvider = mockOnRampPriceRegistryProvider + + p.commitRootsCache = cache.NewCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Minute, time.Minute) + p.chainHealthcheck = cache.NewChainHealthcheck(p.lggr, mockOnRampReader, commitStoreReader) + + bs := &BestEffortBatchingStrategy{} + p.batchingStrategy = bs + + _, err = p.Observation(ctx, types.ReportTimestamp{}, types.Query{}) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func TestExecutionReportingPlugin_Report(t *testing.T) { + testCases := []struct { + name string + f int + committedSeqNum uint64 + observations []ccip.ExecutionObservation + + expectingSomeReport bool + expectedReport cciptypes.ExecReport + expectingSomeErr bool + }{ + { + name: "not enough observations to form consensus", + f: 5, + committedSeqNum: 5, + observations: []ccip.ExecutionObservation{ + {Messages: map[uint64]ccip.MsgData{3: {}, 4: {}}}, + {Messages: map[uint64]ccip.MsgData{3: {}, 4: {}}}, + }, + expectingSomeErr: false, + expectingSomeReport: false, + }, + { + name: "zero observations", + f: 0, + committedSeqNum: 5, + observations: []ccip.ExecutionObservation{}, + expectingSomeErr: false, + expectingSomeReport: false, + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := ExecutionReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.F = tc.f + + p.commitStoreReader = ccipdatamocks.NewCommitStoreReader(t) + chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthcheck.On("IsHealthy", ctx).Return(true, nil) + p.chainHealthcheck = chainHealthcheck + + observations := make([]types.AttributedObservation, len(tc.observations)) + for i := range observations { + b, err := json.Marshal(tc.observations[i]) + assert.NoError(t, err) + observations[i] = types.AttributedObservation{Observation: b, Observer: commontypes.OracleID(i + 1)} + } + + _, _, err := p.Report(ctx, types.ReportTimestamp{}, types.Query{}, observations) + if tc.expectingSomeErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + }) + } +} + +func TestExecutionReportingPlugin_ShouldAcceptFinalizedReport(t *testing.T) { + msg := cciptypes.EVM2EVMMessage{ + SequenceNumber: 12, + FeeTokenAmount: big.NewInt(1e9), + Sender: cciptypes.Address(utils.RandomAddress().String()), + Nonce: 1, + GasLimit: big.NewInt(1), + Strict: false, + Receiver: cciptypes.Address(utils.RandomAddress().String()), + Data: nil, + TokenAmounts: nil, + FeeToken: cciptypes.Address(utils.RandomAddress().String()), + MessageID: [32]byte{}, + } + report := cciptypes.ExecReport{ + Messages: []cciptypes.EVM2EVMMessage{msg}, + OffchainTokenData: [][][]byte{{}}, + Proofs: [][32]byte{{}}, + ProofFlagBits: big.NewInt(1), + } + + encodedReport := encodeExecutionReport(t, report) + mockOffRampReader := ccipdatamocks.NewOffRampReader(t) + mockOffRampReader.On("DecodeExecutionReport", mock.Anything, encodedReport).Return(report, nil) + + chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil) + + plugin := ExecutionReportingPlugin{ + offRampReader: mockOffRampReader, + lggr: logger.TestLogger(t), + inflightReports: newInflightExecReportsContainer(1 * time.Hour), + chainHealthcheck: chainHealthcheck, + metricsCollector: ccip.NoopMetricsCollector, + } + + mockedExecState := mockOffRampReader.On("GetExecutionState", mock.Anything, uint64(12)).Return(uint8(cciptypes.ExecutionStateUntouched), nil).Once() + + should, err := plugin.ShouldAcceptFinalizedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport) + require.NoError(t, err) + assert.Equal(t, true, should) + + mockedExecState.Return(uint8(cciptypes.ExecutionStateSuccess), nil).Once() + + should, err = plugin.ShouldAcceptFinalizedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport) + require.NoError(t, err) + assert.Equal(t, false, should) +} + +func TestExecutionReportingPlugin_ShouldTransmitAcceptedReport(t *testing.T) { + msg := cciptypes.EVM2EVMMessage{ + SequenceNumber: 12, + FeeTokenAmount: big.NewInt(1e9), + Sender: cciptypes.Address(utils.RandomAddress().String()), + Nonce: 1, + GasLimit: big.NewInt(1), + Strict: false, + Receiver: cciptypes.Address(utils.RandomAddress().String()), + Data: nil, + TokenAmounts: nil, + FeeToken: cciptypes.Address(utils.RandomAddress().String()), + MessageID: [32]byte{}, + } + report := cciptypes.ExecReport{ + Messages: []cciptypes.EVM2EVMMessage{msg}, + OffchainTokenData: [][][]byte{{}}, + Proofs: [][32]byte{{}}, + ProofFlagBits: big.NewInt(1), + } + encodedReport := encodeExecutionReport(t, report) + + mockCommitStoreReader := ccipdatamocks.NewCommitStoreReader(t) + mockOffRampReader := ccipdatamocks.NewOffRampReader(t) + mockOffRampReader.On("DecodeExecutionReport", mock.Anything, encodedReport).Return(report, nil) + mockedExecState := mockOffRampReader.On("GetExecutionState", mock.Anything, uint64(12)).Return(uint8(cciptypes.ExecutionStateUntouched), nil).Once() + + chainHealthcheck := ccipcachemocks.NewChainHealthcheck(t) + chainHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil) + + plugin := ExecutionReportingPlugin{ + commitStoreReader: mockCommitStoreReader, + offRampReader: mockOffRampReader, + lggr: logger.TestLogger(t), + inflightReports: newInflightExecReportsContainer(1 * time.Hour), + chainHealthcheck: chainHealthcheck, + } + + should, err := plugin.ShouldTransmitAcceptedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport) + require.NoError(t, err) + assert.Equal(t, true, should) + + mockedExecState.Return(uint8(cciptypes.ExecutionStateFailure), nil).Once() + should, err = plugin.ShouldTransmitAcceptedReport(testutils.Context(t), ocrtypes.ReportTimestamp{}, encodedReport) + require.NoError(t, err) + assert.Equal(t, false, should) +} + +func TestExecutionReportingPlugin_buildReport(t *testing.T) { + ctx := testutils.Context(t) + + const numMessages = 100 + const tokensPerMessage = 20 + const bytesPerMessage = 1000 + + executionReport := generateExecutionReport(t, numMessages, tokensPerMessage, bytesPerMessage) + encodedReport := encodeExecutionReport(t, executionReport) + // ensure "naive" full report would be bigger than limit + assert.Greater(t, len(encodedReport), MaxExecutionReportLength, "full execution report length") + + observations := make([]ccip.ObservedMessage, len(executionReport.Messages)) + for i, msg := range executionReport.Messages { + observations[i] = ccip.NewObservedMessage(msg.SequenceNumber, executionReport.OffchainTokenData[i]) + } + + // ensure that buildReport should cap the built report to fit in MaxExecutionReportLength + p := &ExecutionReportingPlugin{} + p.lggr = logger.TestLogger(t) + + commitStore := ccipdatamocks.NewCommitStoreReader(t) + commitStore.On("VerifyExecutionReport", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + commitStore.On("GetExpectedNextSequenceNumber", mock.Anything). + Return(executionReport.Messages[len(executionReport.Messages)-1].SequenceNumber+1, nil) + commitStore.On("GetCommitReportMatchingSeqNum", ctx, observations[0].SeqNr, 0). + Return([]cciptypes.CommitStoreReportWithTxMeta{ + { + CommitStoreReport: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{ + Min: observations[0].SeqNr, + Max: observations[len(observations)-1].SeqNr, + }, + }, + }, + }, nil) + p.metricsCollector = ccip.NoopMetricsCollector + p.commitStoreReader = commitStore + + lp := lpMocks.NewLogPoller(t) + offRampReader, err := v1_0_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lp, nil, nil) + assert.NoError(t, err) + p.offRampReader = offRampReader + + sendReqs := make([]cciptypes.EVM2EVMMessageWithTxMeta, len(observations)) + sourceReader := ccipdatamocks.NewOnRampReader(t) + for i := range observations { + msg := cciptypes.EVM2EVMMessage{ + SourceChainSelector: math.MaxUint64, + SequenceNumber: uint64(i + 1), + FeeTokenAmount: big.NewInt(math.MaxInt64), + Sender: cciptypes.Address(utils.RandomAddress().String()), + Nonce: math.MaxUint64, + GasLimit: big.NewInt(math.MaxInt64), + Strict: false, + Receiver: cciptypes.Address(utils.RandomAddress().String()), + Data: bytes.Repeat([]byte{0}, bytesPerMessage), + TokenAmounts: nil, + FeeToken: cciptypes.Address(utils.RandomAddress().String()), + MessageID: [32]byte{12}, + } + sendReqs[i] = cciptypes.EVM2EVMMessageWithTxMeta{EVM2EVMMessage: msg} + } + sourceReader.On("GetSendRequestsBetweenSeqNums", + ctx, observations[0].SeqNr, observations[len(observations)-1].SeqNr, false).Return(sendReqs, nil) + p.onRampReader = sourceReader + + execReport, err := p.buildReport(ctx, p.lggr, observations) + assert.NoError(t, err) + assert.LessOrEqual(t, len(execReport), MaxExecutionReportLength, "built execution report length") +} + +func TestExecutionReportingPlugin_getReportsWithSendRequests(t *testing.T) { + testCases := []struct { + name string + reports []cciptypes.CommitStoreReport + expQueryMin uint64 // expected min/max used in the query to get ccipevents + expQueryMax uint64 + onchainEvents []cciptypes.EVM2EVMMessageWithTxMeta + destExecutedSeqNums []uint64 + + expReports []commitReportWithSendRequests + expErr bool + }{ + { + name: "no reports", + reports: nil, + expReports: nil, + expErr: false, + }, + { + name: "two reports happy flow", + reports: []cciptypes.CommitStoreReport{ + { + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 2}, + MerkleRoot: [32]byte{100}, + }, + { + Interval: cciptypes.CommitStoreInterval{Min: 3, Max: 3}, + MerkleRoot: [32]byte{200}, + }, + }, + expQueryMin: 1, + expQueryMax: 3, + onchainEvents: []cciptypes.EVM2EVMMessageWithTxMeta{ + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 2}}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 3}}, + }, + destExecutedSeqNums: []uint64{1}, + expReports: []commitReportWithSendRequests{ + { + commitReport: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 2}, + MerkleRoot: [32]byte{100}, + }, + sendRequestsWithMeta: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}, + Executed: true, + Finalized: true, + }, + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 2}, + Executed: false, + Finalized: false, + }, + }, + }, + { + commitReport: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 3, Max: 3}, + MerkleRoot: [32]byte{200}, + }, + sendRequestsWithMeta: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + { + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 3}, + Executed: false, + Finalized: false, + }, + }, + }, + }, + expErr: false, + }, + } + + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := &ExecutionReportingPlugin{} + p.lggr = lggr + + offRampReader := ccipdatamocks.NewOffRampReader(t) + p.offRampReader = offRampReader + + sourceReader := ccipdatamocks.NewOnRampReader(t) + sourceReader.On("GetSendRequestsBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMax, false). + Return(tc.onchainEvents, nil).Maybe() + p.onRampReader = sourceReader + + finalized := make(map[uint64]cciptypes.FinalizedStatus) + for _, r := range tc.expReports { + for _, s := range r.sendRequestsWithMeta { + finalized[s.SequenceNumber] = cciptypes.FinalizedStatusNotFinalized + if s.Finalized { + finalized[s.SequenceNumber] = cciptypes.FinalizedStatusFinalized + } + } + } + + var executedEvents []cciptypes.ExecutionStateChangedWithTxMeta + for _, executedSeqNum := range tc.destExecutedSeqNums { + executedEvents = append(executedEvents, cciptypes.ExecutionStateChangedWithTxMeta{ + ExecutionStateChanged: cciptypes.ExecutionStateChanged{ + SequenceNumber: executedSeqNum, + }, + TxMeta: cciptypes.TxMeta{ + Finalized: finalized[executedSeqNum], + }, + }) + } + offRampReader.On("GetExecutionStateChangesBetweenSeqNums", ctx, tc.expQueryMin, tc.expQueryMax, 0).Return(executedEvents, nil).Maybe() + + populatedReports, err := p.getReportsWithSendRequests(ctx, tc.reports) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, len(tc.expReports), len(populatedReports)) + for i, expReport := range tc.expReports { + assert.Equal(t, len(expReport.sendRequestsWithMeta), len(populatedReports[i].sendRequestsWithMeta)) + for j, expReq := range expReport.sendRequestsWithMeta { + assert.Equal(t, expReq.Executed, populatedReports[i].sendRequestsWithMeta[j].Executed) + assert.Equal(t, expReq.Finalized, populatedReports[i].sendRequestsWithMeta[j].Finalized) + assert.Equal(t, expReq.SequenceNumber, populatedReports[i].sendRequestsWithMeta[j].SequenceNumber) + } + } + }) + } +} + +func Test_calculateObservedMessagesConsensus(t *testing.T) { + type args struct { + observations []ccip.ExecutionObservation + f int + } + tests := []struct { + name string + args args + want []ccip.ObservedMessage + }{ + { + name: "no observations", + args: args{ + observations: nil, + f: 0, + }, + want: []ccip.ObservedMessage{}, + }, + { + name: "common path", + args: args{ + observations: []ccip.ExecutionObservation{ + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}}, + 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}}, + }, + }, + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1}, {0xff}}}, // different token data - should not be picked + 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}}, + 3: {TokenData: [][]byte{{0x3}, {0x3}, {0x3}}}, + }, + }, + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}}, + 2: {TokenData: [][]byte{{0x2}, {0x2}, {0x2}}}, + }, + }, + }, + f: 1, + }, + want: []ccip.ObservedMessage{ + {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x1}, {0x1}, {0x1}}}}, + {SeqNr: 2, MsgData: ccip.MsgData{TokenData: [][]byte{{0x2}, {0x2}, {0x2}}}}, + }, + }, + { + name: "similar token data", + args: args{ + observations: []ccip.ExecutionObservation{ + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1}, {0x1}}}, + }, + }, + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1, 0x1}}}, + }, + }, + { + Messages: map[uint64]ccip.MsgData{ + 1: {TokenData: [][]byte{{0x1}, {0x1, 0x1}}}, + }, + }, + }, + f: 1, + }, + want: []ccip.ObservedMessage{ + {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x1}, {0x1, 0x1}}}}, + }, + }, + { + name: "results should be deterministic", + args: args{ + observations: []ccip.ExecutionObservation{ + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x2}}}}}, + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x2}}}}}, + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x1}}}}}, + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x3}}}}}, + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x3}}}}}, + {Messages: map[uint64]ccip.MsgData{1: {TokenData: [][]byte{{0x1}}}}}, + }, + f: 1, + }, + want: []ccip.ObservedMessage{ + {SeqNr: 1, MsgData: ccip.MsgData{TokenData: [][]byte{{0x3}}}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, err := calculateObservedMessagesConsensus( + tt.args.observations, + tt.args.f, + ) + assert.NoError(t, err) + sort.Slice(res, func(i, j int) bool { + return res[i].SeqNr < res[j].SeqNr + }) + assert.Equalf(t, tt.want, res, "calculateObservedMessagesConsensus(%v, %v)", tt.args.observations, tt.args.f) + }) + } +} + +func Test_getTokensPrices(t *testing.T) { + tk1 := ccipcalc.HexToAddress("1") + tk2 := ccipcalc.HexToAddress("2") + tk3 := ccipcalc.HexToAddress("3") + + testCases := []struct { + name string + feeTokens []cciptypes.Address + tokens []cciptypes.Address + retPrices []cciptypes.TokenPriceUpdate + expPrices map[cciptypes.Address]*big.Int + expErr bool + }{ + { + name: "base", + feeTokens: []cciptypes.Address{tk1, tk2}, + tokens: []cciptypes.Address{tk3}, + retPrices: []cciptypes.TokenPriceUpdate{ + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}}, + }, + expPrices: map[cciptypes.Address]*big.Int{ + tk1: big.NewInt(10), + tk2: big.NewInt(20), + tk3: big.NewInt(30), + }, + expErr: false, + }, + { + name: "token is both fee token and normal token", + feeTokens: []cciptypes.Address{tk1, tk2}, + tokens: []cciptypes.Address{tk3, tk1}, + retPrices: []cciptypes.TokenPriceUpdate{ + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}}, + }, + expPrices: map[cciptypes.Address]*big.Int{ + tk1: big.NewInt(10), + tk2: big.NewInt(20), + tk3: big.NewInt(30), + }, + expErr: false, + }, + { + name: "token is both fee token and normal token and price registry gave different price", + feeTokens: []cciptypes.Address{tk1, tk2}, + tokens: []cciptypes.Address{tk3, tk1}, + retPrices: []cciptypes.TokenPriceUpdate{ + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(30)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(1000)}}, + }, + expErr: true, + }, + { + name: "contract returns less prices than requested", + feeTokens: []cciptypes.Address{tk1, tk2}, + tokens: []cciptypes.Address{tk3}, + retPrices: []cciptypes.TokenPriceUpdate{ + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(10)}}, + {TokenPrice: cciptypes.TokenPrice{Value: big.NewInt(20)}}, + }, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + priceReg := ccipdatamocks.NewPriceRegistryReader(t) + priceReg.On("GetTokenPrices", mock.Anything, mock.Anything).Return(tc.retPrices, nil) + priceReg.On("Address", mock.Anything).Return(cciptypes.Address(utils.RandomAddress().String()), nil).Maybe() + + tokenPrices, err := getTokensPrices(context.Background(), priceReg, append(tc.feeTokens, tc.tokens...)) + if tc.expErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + for tk, price := range tc.expPrices { + assert.Equal(t, price, tokenPrices[tk]) + } + }) + } +} + +func Test_calculateMessageMaxGas(t *testing.T) { + type args struct { + gasLimit *big.Int + numRequests int + dataLen int + numTokens int + } + tests := []struct { + name string + args args + want uint64 + wantErr bool + }{ + { + name: "base", + args: args{gasLimit: big.NewInt(1000), numRequests: 5, dataLen: 5, numTokens: 2}, + want: 826_336, + wantErr: false, + }, + { + name: "large", + args: args{gasLimit: big.NewInt(1000), numRequests: 1000, dataLen: 1000, numTokens: 1000}, + want: 346_485_176, + wantErr: false, + }, + { + name: "gas limit overflow", + args: args{gasLimit: big.NewInt(0).Mul(big.NewInt(math.MaxInt64), big.NewInt(math.MaxInt64))}, + want: 36_391_540, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := calculateMessageMaxGas(tt.args.gasLimit, tt.args.numRequests, tt.args.dataLen, tt.args.numTokens) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equalf(t, tt.want, got, "calculateMessageMaxGas(%v, %v, %v, %v)", tt.args.gasLimit, tt.args.numRequests, tt.args.dataLen, tt.args.numTokens) + }) + } +} + +func Test_inflightAggregates(t *testing.T) { + const n = 10 + addrs := make([]cciptypes.Address, n) + tokenAddrs := make([]cciptypes.Address, n) + for i := range addrs { + addrs[i] = cciptypes.Address(utils.RandomAddress().String()) + tokenAddrs[i] = cciptypes.Address(utils.RandomAddress().String()) + } + lggr := logger.TestLogger(t) + + testCases := []struct { + name string + inflight []InflightInternalExecutionReport + destTokenPrices map[cciptypes.Address]*big.Int + sourceToDest map[cciptypes.Address]cciptypes.Address + + expInflightSeqNrs mapset.Set[uint64] + expInflightAggrVal *big.Int + expMaxInflightSenderNonces map[cciptypes.Address]uint64 + expInflightTokenAmounts map[cciptypes.Address]*big.Int + expErr bool + }{ + { + name: "base", + inflight: []InflightInternalExecutionReport{ + { + messages: []cciptypes.EVM2EVMMessage{ + { + Sender: addrs[0], + SequenceNumber: 100, + Nonce: 2, + TokenAmounts: []cciptypes.TokenAmount{ + {Token: tokenAddrs[0], Amount: big.NewInt(1e18)}, + {Token: tokenAddrs[0], Amount: big.NewInt(2e18)}, + }, + }, + { + Sender: addrs[0], + SequenceNumber: 106, + Nonce: 4, + TokenAmounts: []cciptypes.TokenAmount{ + {Token: tokenAddrs[0], Amount: big.NewInt(1e18)}, + {Token: tokenAddrs[0], Amount: big.NewInt(5e18)}, + {Token: tokenAddrs[2], Amount: big.NewInt(5e18)}, + }, + }, + }, + }, + }, + destTokenPrices: map[cciptypes.Address]*big.Int{ + tokenAddrs[1]: big.NewInt(1000), + tokenAddrs[3]: big.NewInt(500), + }, + sourceToDest: map[cciptypes.Address]cciptypes.Address{ + tokenAddrs[0]: tokenAddrs[1], + tokenAddrs[2]: tokenAddrs[3], + }, + expInflightSeqNrs: mapset.NewSet[uint64](100, 106), + expInflightAggrVal: big.NewInt(9*1000 + 5*500), + expMaxInflightSenderNonces: map[cciptypes.Address]uint64{ + addrs[0]: 4, + }, + expInflightTokenAmounts: map[cciptypes.Address]*big.Int{ + tokenAddrs[0]: big.NewInt(9e18), + tokenAddrs[2]: big.NewInt(5e18), + }, + expErr: false, + }, + { + name: "missing price should be 0", + inflight: []InflightInternalExecutionReport{ + { + messages: []cciptypes.EVM2EVMMessage{ + { + Sender: addrs[0], + SequenceNumber: 100, + Nonce: 2, + TokenAmounts: []cciptypes.TokenAmount{ + {Token: tokenAddrs[0], Amount: big.NewInt(1e18)}, + }, + }, + }, + }, + }, + destTokenPrices: map[cciptypes.Address]*big.Int{ + tokenAddrs[3]: big.NewInt(500), + }, + sourceToDest: map[cciptypes.Address]cciptypes.Address{ + tokenAddrs[2]: tokenAddrs[3], + }, + expInflightAggrVal: big.NewInt(0), + expErr: false, + }, + { + name: "nothing inflight", + inflight: []InflightInternalExecutionReport{}, + expInflightSeqNrs: mapset.NewSet[uint64](), + expInflightAggrVal: big.NewInt(0), + expMaxInflightSenderNonces: map[cciptypes.Address]uint64{}, + expInflightTokenAmounts: map[cciptypes.Address]*big.Int{}, + expErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + inflightAggrVal, err := getInflightAggregateRateLimit( + lggr, + tc.inflight, + tc.destTokenPrices, + tc.sourceToDest, + ) + + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(tc.expInflightAggrVal, inflightAggrVal)) + }) + } +} + +func Test_commitReportWithSendRequests_validate(t *testing.T) { + testCases := []struct { + name string + reportInterval cciptypes.CommitStoreInterval + numReqs int + expValid bool + }{ + { + name: "valid report", + reportInterval: cciptypes.CommitStoreInterval{Min: 10, Max: 20}, + numReqs: 11, + expValid: true, + }, + { + name: "report with one request", + reportInterval: cciptypes.CommitStoreInterval{Min: 1234, Max: 1234}, + numReqs: 1, + expValid: true, + }, + { + name: "request is missing", + reportInterval: cciptypes.CommitStoreInterval{Min: 1234, Max: 1234}, + numReqs: 0, + expValid: false, + }, + { + name: "requests are missing", + reportInterval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + numReqs: 5, + expValid: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rep := commitReportWithSendRequests{ + commitReport: cciptypes.CommitStoreReport{ + Interval: tc.reportInterval, + }, + sendRequestsWithMeta: make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tc.numReqs), + } + err := rep.validate() + isValid := err == nil + assert.Equal(t, tc.expValid, isValid) + }) + } +} + +func Test_commitReportWithSendRequests_allRequestsAreExecutedAndFinalized(t *testing.T) { + testCases := []struct { + name string + reqs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + expRes bool + }{ + { + name: "all requests executed and finalized", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + {Executed: true, Finalized: true}, + {Executed: true, Finalized: true}, + {Executed: true, Finalized: true}, + }, + expRes: true, + }, + { + name: "true when there are zero requests", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{}, + expRes: true, + }, + { + name: "some request not executed", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + {Executed: true, Finalized: true}, + {Executed: true, Finalized: true}, + {Executed: false, Finalized: true}, + }, + expRes: false, + }, + { + name: "some request not finalized", + reqs: []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + {Executed: true, Finalized: true}, + {Executed: true, Finalized: true}, + {Executed: true, Finalized: false}, + }, + expRes: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rep := commitReportWithSendRequests{sendRequestsWithMeta: tc.reqs} + res := rep.allRequestsAreExecutedAndFinalized() + assert.Equal(t, tc.expRes, res) + }) + } +} + +func Test_commitReportWithSendRequests_sendReqFits(t *testing.T) { + testCases := []struct { + name string + req cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + report cciptypes.CommitStoreReport + expRes bool + }{ + { + name: "all requests executed and finalized", + req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}, + }, + report: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + }, + expRes: true, + }, + { + name: "all requests executed and finalized", + req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10}, + }, + report: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + }, + expRes: true, + }, + { + name: "all requests executed and finalized", + req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 11}, + }, + report: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + }, + expRes: false, + }, + { + name: "all requests executed and finalized", + req: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 10}, + }, + report: cciptypes.CommitStoreReport{ + Interval: cciptypes.CommitStoreInterval{Min: 10, Max: 10}, + }, + expRes: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r := &commitReportWithSendRequests{commitReport: tc.report} + assert.Equal(t, tc.expRes, r.sendReqFits(tc.req)) + }) + } +} + +// generateExecutionReport generates an execution report that can be used in tests +func generateExecutionReport(t *testing.T, numMsgs, tokensPerMsg, bytesPerMsg int) cciptypes.ExecReport { + messages := make([]cciptypes.EVM2EVMMessage, numMsgs) + + randAddr := func() cciptypes.Address { + return cciptypes.Address(utils.RandomAddress().String()) + } + + offChainTokenData := make([][][]byte, numMsgs) + for i := range messages { + tokenAmounts := make([]cciptypes.TokenAmount, tokensPerMsg) + for j := range tokenAmounts { + tokenAmounts[j] = cciptypes.TokenAmount{ + Token: randAddr(), + Amount: big.NewInt(math.MaxInt64), + } + } + + messages[i] = cciptypes.EVM2EVMMessage{ + SourceChainSelector: rand.Uint64(), + SequenceNumber: uint64(i + 1), + FeeTokenAmount: big.NewInt(rand.Int64()), + Sender: randAddr(), + Nonce: rand.Uint64(), + GasLimit: big.NewInt(rand.Int64()), + Strict: false, + Receiver: randAddr(), + Data: bytes.Repeat([]byte{1}, bytesPerMsg), + TokenAmounts: tokenAmounts, + FeeToken: randAddr(), + MessageID: utils.RandomBytes32(), + } + + data := []byte(`{"foo": "bar"}`) + offChainTokenData[i] = [][]byte{data, data, data} + } + + return cciptypes.ExecReport{ + Messages: messages, + OffchainTokenData: offChainTokenData, + Proofs: make([][32]byte, numMsgs), + ProofFlagBits: big.NewInt(rand.Int64()), + } +} + +func Test_selectReportsToFillBatch(t *testing.T) { + tests := []struct { + name string + messagesLimit uint64 // maximum number of messages that can be included in a batch. + expectedBatches int // expected number of batches. + expectedReports int // expected number of selected reports. + }{ + { + name: "pick all at once when messages limit is high", + messagesLimit: 5000, + expectedBatches: 1, + expectedReports: 10, + }, + { + name: "pick none when messages limit is below commit report size", + messagesLimit: 199, + expectedBatches: 0, + expectedReports: 0, + }, + { + name: "pick exactly the number in each report", + messagesLimit: 200, + expectedBatches: 10, + expectedReports: 10, + }, + { + name: "messages limit larger than individual reports", + messagesLimit: 300, + expectedBatches: 10, + expectedReports: 10, + }, + { + name: "messages limit larger than several reports", + messagesLimit: 650, + expectedBatches: 4, + expectedReports: 10, + }, + { + name: "default limit", + messagesLimit: 1024, + expectedBatches: 2, + expectedReports: 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nbCommitStoreReports := 10 + nbMsgPerRoot := 200 + + var reports []cciptypes.CommitStoreReport + for i := 0; i < nbCommitStoreReports; i++ { + reports = append(reports, cciptypes.CommitStoreReport{Interval: cciptypes.CommitStoreInterval{Min: uint64(i * nbMsgPerRoot), Max: uint64((i+1)*nbMsgPerRoot - 1)}}) + } + + var unexpiredReportsBatches [][]cciptypes.CommitStoreReport + for i := 0; i < len(reports); { + unexpiredReports, step := selectReportsToFillBatch(reports[i:], tt.messagesLimit) + if step == 0 { + break + } + unexpiredReportsBatches = append(unexpiredReportsBatches, unexpiredReports) + i += step + } + assert.Len(t, unexpiredReportsBatches, tt.expectedBatches) + + var flatten []cciptypes.CommitStoreReport + for _, r := range unexpiredReportsBatches { + flatten = append(flatten, r...) + } + assert.Equal(t, tt.expectedReports, len(flatten)) + if tt.expectedBatches > 0 { + assert.Equal(t, reports, flatten) + } else { + assert.Empty(t, flatten) + } + }) + } +} + +func Test_prepareTokenExecData(t *testing.T) { + ctx := testutils.Context(t) + + weth := cciptypes.Address(utils.RandomAddress().String()) + wavax := cciptypes.Address(utils.RandomAddress().String()) + link := cciptypes.Address(utils.RandomAddress().String()) + usdc := cciptypes.Address(utils.RandomAddress().String()) + + wethPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: weth, Value: big.NewInt(2e18)}} + wavaxPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: wavax, Value: big.NewInt(3e18)}} + linkPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: link, Value: big.NewInt(4e18)}} + usdcPriceUpdate := cciptypes.TokenPriceUpdate{TokenPrice: cciptypes.TokenPrice{Token: usdc, Value: big.NewInt(5e18)}} + + tokenPrices := map[cciptypes.Address]cciptypes.TokenPriceUpdate{weth: wethPriceUpdate, wavax: wavaxPriceUpdate, link: linkPriceUpdate, usdc: usdcPriceUpdate} + + tests := []struct { + name string + sourceFeeTokens []cciptypes.Address + sourceFeeTokensErr error + destTokens []cciptypes.Address + destTokensErr error + destFeeTokens []cciptypes.Address + destFeeTokensErr error + sourcePrices []cciptypes.TokenPriceUpdate + destPrices []cciptypes.TokenPriceUpdate + }{ + { + name: "only native token", + sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{wavaxPriceUpdate}, + }, + { + name: "additional dest fee token", + destFeeTokens: []cciptypes.Address{link}, + sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wavaxPriceUpdate}, + }, + { + name: "dest tokens", + destTokens: []cciptypes.Address{link, usdc}, + sourcePrices: []cciptypes.TokenPriceUpdate{wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, usdcPriceUpdate, wavaxPriceUpdate}, + }, + { + name: "source fee tokens", + sourceFeeTokens: []cciptypes.Address{usdc}, + sourcePrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{wavaxPriceUpdate}, + }, + { + name: "source, dest and fee tokens", + sourceFeeTokens: []cciptypes.Address{usdc}, + destTokens: []cciptypes.Address{link}, + destFeeTokens: []cciptypes.Address{usdc}, + sourcePrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{usdcPriceUpdate, linkPriceUpdate, wavaxPriceUpdate}, + }, + { + name: "source, dest and fee tokens with duplicates", + sourceFeeTokens: []cciptypes.Address{link, weth}, + destTokens: []cciptypes.Address{link, wavax}, + destFeeTokens: []cciptypes.Address{link, wavax}, + sourcePrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wethPriceUpdate}, + destPrices: []cciptypes.TokenPriceUpdate{linkPriceUpdate, wavaxPriceUpdate}, + }, + { + name: "everything fails when source fails", + sourceFeeTokensErr: errors.New("source error"), + }, + { + name: "everything fails when dest fee fails", + destFeeTokensErr: errors.New("dest fee error"), + }, + { + name: "everything fails when dest fails", + destTokensErr: errors.New("dest error"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + onrampReader := ccipdatamocks.NewOnRampReader(t) + offrampReader := ccipdatamocks.NewOffRampReader(t) + sourcePriceRegistry := ccipdatamocks.NewPriceRegistryReader(t) + destPriceRegistry := ccipdatamocks.NewPriceRegistryReader(t) + gasPriceEstimator := prices.NewMockGasPriceEstimatorExec(t) + sourcePriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t) + + sourcePriceRegistryAddress := cciptypes.Address(utils.RandomAddress().String()) + onrampReader.On("SourcePriceRegistryAddress", ctx).Return(sourcePriceRegistryAddress, nil).Maybe() + offrampReader.On("CurrentRateLimiterState", ctx).Return(cciptypes.TokenBucketRateLimit{}, nil).Maybe() + offrampReader.On("GetSourceToDestTokensMapping", ctx).Return(map[cciptypes.Address]cciptypes.Address{}, nil).Maybe() + gasPriceEstimator.On("GetGasPrice", ctx).Return(big.NewInt(1e9), nil).Maybe() + + offrampReader.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{DestinationTokens: tt.destTokens}, tt.destTokensErr).Maybe() + sourcePriceRegistry.On("Address", mock.Anything).Return(sourcePriceRegistryAddress, nil).Maybe() + sourcePriceRegistry.On("GetFeeTokens", ctx).Return(tt.sourceFeeTokens, tt.sourceFeeTokensErr).Maybe() + sourcePriceRegistry.On("GetTokenPrices", ctx, mock.Anything).Return(tt.sourcePrices, nil).Maybe() + destPriceRegistry.On("GetFeeTokens", ctx).Return(tt.destFeeTokens, tt.destFeeTokensErr).Maybe() + destPriceRegistry.On("GetTokenPrices", ctx, mock.Anything).Return(tt.destPrices, nil).Maybe() + + sourcePriceRegistryProvider.On("NewPriceRegistryReader", ctx, sourcePriceRegistryAddress).Return(sourcePriceRegistry, nil).Maybe() + + reportingPlugin := ExecutionReportingPlugin{ + onRampReader: onrampReader, + offRampReader: offrampReader, + sourcePriceRegistry: sourcePriceRegistry, + sourcePriceRegistryProvider: sourcePriceRegistryProvider, + destPriceRegistry: destPriceRegistry, + gasPriceEstimator: gasPriceEstimator, + sourceWrappedNativeToken: weth, + destWrappedNative: wavax, + } + + tokenData, err := reportingPlugin.prepareTokenExecData(ctx) + if tt.destFeeTokensErr != nil || tt.sourceFeeTokensErr != nil || tt.destTokensErr != nil { + require.Error(t, err) + return + } + + require.NoError(t, err) + assert.Len(t, tokenData.sourceTokenPrices, len(tt.sourcePrices)) + assert.Len(t, tokenData.destTokenPrices, len(tt.destPrices)) + + for token, price := range tokenData.sourceTokenPrices { + assert.Equal(t, tokenPrices[token].Value, price) + } + + for token, price := range tokenData.destTokenPrices { + assert.Equal(t, tokenPrices[token].Value, price) + } + }) + } +} + +func encodeExecutionReport(t *testing.T, report cciptypes.ExecReport) []byte { + reader, err := v1_2_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, nil, nil, nil) + require.NoError(t, err) + ctx := testutils.Context(t) + encodedReport, err := reader.EncodeExecutionReport(ctx, report) + require.NoError(t, err) + return encodedReport +} + +// Verify the price registry update mechanism in case of configuration change on the source onRamp. +func TestExecutionReportingPlugin_ensurePriceRegistrySynchronization(t *testing.T) { + p := &ExecutionReportingPlugin{} + p.lggr = logger.TestLogger(t) + p.sourcePriceRegistryLock = sync.RWMutex{} + + sourcePriceRegistryAddress1 := cciptypes.Address(utils.RandomAddress().String()) + sourcePriceRegistryAddress2 := cciptypes.Address(utils.RandomAddress().String()) + + mockPriceRegistryReader1 := ccipdatamocks.NewPriceRegistryReader(t) + mockPriceRegistryReader2 := ccipdatamocks.NewPriceRegistryReader(t) + mockPriceRegistryReader1.On("Address", mock.Anything).Return(sourcePriceRegistryAddress1, nil) + mockPriceRegistryReader2.On("Address", mock.Anything).Return(sourcePriceRegistryAddress2, nil).Maybe() + mockPriceRegistryReader1.On("Close", mock.Anything).Return(nil) + mockPriceRegistryReader2.On("Close", mock.Anything).Return(nil).Maybe() + + mockSourcePriceRegistryProvider := ccipdataprovidermocks.NewPriceRegistry(t) + mockSourcePriceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, sourcePriceRegistryAddress1).Return(mockPriceRegistryReader1, nil) + mockSourcePriceRegistryProvider.On("NewPriceRegistryReader", mock.Anything, sourcePriceRegistryAddress2).Return(mockPriceRegistryReader2, nil) + p.sourcePriceRegistryProvider = mockSourcePriceRegistryProvider + + mockOnRampReader := ccipdatamocks.NewOnRampReader(t) + p.onRampReader = mockOnRampReader + + mockOnRampReader.On("SourcePriceRegistryAddress", mock.Anything).Return(sourcePriceRegistryAddress1, nil).Once() + require.Equal(t, nil, p.sourcePriceRegistry) + err := p.ensurePriceRegistrySynchronization(context.Background()) + require.NoError(t, err) + require.Equal(t, mockPriceRegistryReader1, p.sourcePriceRegistry) + + mockOnRampReader.On("SourcePriceRegistryAddress", mock.Anything).Return(sourcePriceRegistryAddress2, nil).Once() + err = p.ensurePriceRegistrySynchronization(context.Background()) + require.NoError(t, err) + require.Equal(t, mockPriceRegistryReader2, p.sourcePriceRegistry) +} diff --git a/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go b/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go new file mode 100644 index 00000000000..142ba006be6 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/clo_ccip_integration_test.go @@ -0,0 +1,137 @@ +package ccip_test + +import ( + "encoding/json" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration" +) + +func Test_CLOSpecApprovalFlow_pipeline(t *testing.T) { + ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector) + + tokenPricesUSDPipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t) + defer linkUSD.Close() + defer ethUSD.Close() + + test_CLOSpecApprovalFlow(t, ccipTH, tokenPricesUSDPipeline, "") +} + +func Test_CLOSpecApprovalFlow_dynamicPriceGetter(t *testing.T) { + ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector) + + //Set up the aggregators here to avoid modifying ccipTH. + srcLinkAddr := ccipTH.Source.LinkToken.Address() + dstLinkAddr := ccipTH.Dest.LinkToken.Address() + srcNativeAddr, err := ccipTH.Source.Router.GetWrappedNative(nil) + require.NoError(t, err) + aggDstNativeAddr := ccipTH.Dest.WrappedNative.Address() + + aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18)) + require.NoError(t, err) + _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + + // Check content is ok on aggregator. + tmp, err := aggDstLnk.LatestRoundData(&bind.CallOpts{}) + require.NoError(t, err) + require.Equal(t, big.NewInt(50), tmp.RoundId) + require.Equal(t, big.NewInt(8000000), tmp.Answer) + + // deploy dest wrapped native aggregator + aggDstNativeAggrAddr, _, aggDstNativeAggr, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggDstNativeAggr.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(500000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + + priceGetterConfig := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + srcLinkAddr: { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcLnkAddr, + }, + srcNativeAddr: { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcNatAddr, + }, + dstLinkAddr: { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstLnkAddr, + }, + aggDstNativeAddr: { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstNativeAggrAddr, + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{}, + } + priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ") + require.NoError(t, err) + priceGetterConfigJson := string(priceGetterConfigBytes) + + test_CLOSpecApprovalFlow(t, ccipTH, "", priceGetterConfigJson) +} + +func test_CLOSpecApprovalFlow(t *testing.T, ccipTH integrationtesthelpers.CCIPIntegrationTestHarness, tokenPricesUSDPipeline string, priceGetterConfiguration string) { + jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfiguration, "http://blah.com") + ccipTH.SetupFeedsManager(t) + + // Propose and approve new specs + ccipTH.ApproveJobSpecs(t, jobParams) + + // Sanity check that CCIP works after CLO flow + currentSeqNum := 1 + + extraArgs, err := testhelpers.GetEVMExtraArgsV1(big.NewInt(200_003), false) + require.NoError(t, err) + + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()), + Data: utils.RandomAddress().Bytes(), + TokenAmounts: []router.ClientEVMTokenAmount{}, + FeeToken: ccipTH.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err) + + _, err = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Set(fee)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + ccipTH.SendRequest(t, msg) + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum) + ccipTH.EventuallyReportCommitted(t, currentSeqNum) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) +} diff --git a/core/services/ocr2/plugins/ccip/config/chain_config.go b/core/services/ocr2/plugins/ccip/config/chain_config.go new file mode 100644 index 00000000000..ff82def6066 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/chain_config.go @@ -0,0 +1,48 @@ +package config + +import ( + "strconv" + + "github.com/pkg/errors" + chainselectors "github.com/smartcontractkit/chain-selectors" + + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + "github.com/smartcontractkit/chainlink/v2/core/services/job" +) + +func GetChainFromSpec(spec *job.OCR2OracleSpec, chainSet legacyevm.LegacyChainContainer) (legacyevm.Chain, int64, error) { + chainIDInterface, ok := spec.RelayConfig["chainID"] + if !ok { + return nil, 0, errors.New("chainID must be provided in relay config") + } + destChainID := uint64(chainIDInterface.(float64)) + return GetChainByChainID(chainSet, destChainID) +} + +func GetChainByChainSelector(chainSet legacyevm.LegacyChainContainer, chainSelector uint64) (legacyevm.Chain, int64, error) { + chainID, err := chainselectors.ChainIdFromSelector(chainSelector) + if err != nil { + return nil, 0, err + } + return GetChainByChainID(chainSet, chainID) +} + +func GetChainByChainID(chainSet legacyevm.LegacyChainContainer, chainID uint64) (legacyevm.Chain, int64, error) { + chain, err := chainSet.Get(strconv.FormatUint(chainID, 10)) + if err != nil { + return nil, 0, errors.Wrap(err, "chain not found in chainset") + } + return chain, chain.ID().Int64(), nil +} + +func ResolveChainNames(sourceChainId int64, destChainId int64) (string, string, error) { + sourceChainName, err := chainselectors.NameFromChainId(uint64(sourceChainId)) + if err != nil { + return "", "", err + } + destChainName, err := chainselectors.NameFromChainId(uint64(destChainId)) + if err != nil { + return "", "", err + } + return sourceChainName, destChainName, nil +} diff --git a/core/services/ocr2/plugins/ccip/config/chain_config_test.go b/core/services/ocr2/plugins/ccip/config/chain_config_test.go new file mode 100644 index 00000000000..df2351a5ea4 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/chain_config_test.go @@ -0,0 +1,135 @@ +package config + +import ( + "math/big" + "strconv" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/job" +) + +func TestGetChainFromSpec(t *testing.T) { + testChainID := int64(1337) + + tests := []struct { + name string + spec *job.OCR2OracleSpec + expectedErr bool + expectedErrMsg string + }{ + { + name: "success", + spec: &job.OCR2OracleSpec{ + RelayConfig: job.JSONConfig{ + "chainID": float64(testChainID), + }, + }, + expectedErr: false, + }, + { + name: "missing_chain_ID", + spec: &job.OCR2OracleSpec{}, + expectedErr: true, + expectedErrMsg: "chainID must be provided in relay config", + }, + } + + mockChain := mocks.NewChain(t) + mockChain.On("ID").Return(big.NewInt(testChainID)).Maybe() + + mockChainSet := mocks.NewLegacyChainContainer(t) + mockChainSet.On("Get", strconv.FormatInt(testChainID, 10)).Return(mockChain, nil).Maybe() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + chain, chainID, err := GetChainFromSpec(test.spec, mockChainSet) + if test.expectedErr { + require.Error(t, err) + require.Contains(t, err.Error(), test.expectedErrMsg) + } else { + require.NoError(t, err) + require.Equal(t, mockChain, chain) + require.Equal(t, testChainID, chainID) + } + }) + } +} + +func TestGetChainByChainSelector_success(t *testing.T) { + mockChain := mocks.NewChain(t) + mockChain.On("ID").Return(big.NewInt(11155111)) + + mockChainSet := mocks.NewLegacyChainContainer(t) + mockChainSet.On("Get", "11155111").Return(mockChain, nil) + + // Ethereum Sepolia chain selector. + chain, chainID, err := GetChainByChainSelector(mockChainSet, uint64(16015286601757825753)) + require.NoError(t, err) + require.Equal(t, mockChain, chain) + require.Equal(t, int64(11155111), chainID) +} + +func TestGetChainByChainSelector_selectorNotFound(t *testing.T) { + mockChainSet := mocks.NewLegacyChainContainer(t) + + _, _, err := GetChainByChainSelector(mockChainSet, uint64(444000444)) + require.Error(t, err) +} + +func TestGetChainById_notFound(t *testing.T) { + mockChainSet := mocks.NewLegacyChainContainer(t) + mockChainSet.On("Get", "444").Return(nil, errors.New("test")).Maybe() + + _, _, err := GetChainByChainID(mockChainSet, uint64(444)) + require.Error(t, err) + require.Contains(t, err.Error(), "chain not found in chainset") +} + +func TestResolveChainNames(t *testing.T) { + tests := []struct { + name string + sourceChainId int64 + destChainId int64 + expectedSourceChainName string + expectedDestChainName string + expectedErr bool + }{ + { + name: "success", + sourceChainId: 1, + destChainId: 10, + expectedSourceChainName: "ethereum-mainnet", + expectedDestChainName: "ethereum-mainnet-optimism-1", + }, + { + name: "source chain not found", + sourceChainId: 901278309182, + destChainId: 10, + expectedErr: true, + }, + { + name: "dest chain not found", + sourceChainId: 1, + destChainId: 901278309182, + expectedErr: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + sourceChainName, destChainName, err := ResolveChainNames(test.sourceChainId, test.destChainId) + if test.expectedErr { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, test.expectedSourceChainName, sourceChainName) + assert.Equal(t, test.expectedDestChainName, destChainName) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/config/config.go b/core/services/ocr2/plugins/ccip/config/config.go new file mode 100644 index 00000000000..a24a6edfd13 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/config.go @@ -0,0 +1,152 @@ +package config + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/utils/bytes" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +// CommitPluginJobSpecConfig contains the plugin specific variables for the ccip.CCIPCommit plugin. +type CommitPluginJobSpecConfig struct { + SourceStartBlock, DestStartBlock uint64 // Only for first time job add. + OffRamp cciptypes.Address `json:"offRamp"` + // TokenPricesUSDPipeline should contain a token price pipeline for the following tokens: + // The SOURCE chain wrapped native + // The DESTINATION supported tokens (including fee tokens) as defined in destination OffRamp and PriceRegistry. + TokenPricesUSDPipeline string `json:"tokenPricesUSDPipeline,omitempty"` + // PriceGetterConfig defines where to get the token prices from (i.e. static or aggregator source). + PriceGetterConfig *DynamicPriceGetterConfig `json:"priceGetterConfig,omitempty"` +} + +type CommitPluginConfig struct { + IsSourceProvider bool + SourceStartBlock, DestStartBlock uint64 +} + +func (c CommitPluginConfig) Encode() ([]byte, error) { + bytes, err := json.Marshal(c) + if err != nil { + return nil, err + } + return bytes, nil +} + +// DynamicPriceGetterConfig specifies which configuration to use for getting the price of tokens (map keys). +type DynamicPriceGetterConfig struct { + AggregatorPrices map[common.Address]AggregatorPriceConfig `json:"aggregatorPrices"` + StaticPrices map[common.Address]StaticPriceConfig `json:"staticPrices"` +} + +// AggregatorPriceConfig specifies a price retrieved from an aggregator contract. +type AggregatorPriceConfig struct { + ChainID uint64 `json:"chainID,string"` + AggregatorContractAddress common.Address `json:"contractAddress"` +} + +// StaticPriceConfig specifies a price defined statically. +type StaticPriceConfig struct { + ChainID uint64 `json:"chainID,string"` + Price *big.Int `json:"price"` +} + +// UnmarshalJSON provides a custom un-marshaller to handle JSON embedded in Toml content. +func (c *DynamicPriceGetterConfig) UnmarshalJSON(data []byte) error { + type Alias DynamicPriceGetterConfig + if bytes.HasQuotes(data) { + trimmed := string(bytes.TrimQuotes(data)) + trimmed = strings.ReplaceAll(trimmed, "\\n", "") + trimmed = strings.ReplaceAll(trimmed, "\\t", "") + trimmed = strings.ReplaceAll(trimmed, "\\", "") + return json.Unmarshal([]byte(trimmed), (*Alias)(c)) + } + return json.Unmarshal(data, (*Alias)(c)) +} + +func (c *DynamicPriceGetterConfig) Validate() error { + for addr, v := range c.AggregatorPrices { + if addr == utils.ZeroAddress { + return fmt.Errorf("token address is zero") + } + if v.AggregatorContractAddress == utils.ZeroAddress { + return fmt.Errorf("aggregator contract address is zero") + } + if v.ChainID == 0 { + return fmt.Errorf("chain id is zero") + } + } + + for addr, v := range c.StaticPrices { + if addr == utils.ZeroAddress { + return fmt.Errorf("token address is zero") + } + if v.ChainID == 0 { + return fmt.Errorf("chain id is zero") + } + } + + // Ensure no duplication in token price resolution rules. + if c.AggregatorPrices != nil && c.StaticPrices != nil { + for tk := range c.AggregatorPrices { + if _, exists := c.StaticPrices[tk]; exists { + return fmt.Errorf("token %s defined in both aggregator and static price rules", tk) + } + } + } + return nil +} + +// ExecPluginJobSpecConfig contains the plugin specific variables for the ccip.CCIPExecution plugin. +type ExecPluginJobSpecConfig struct { + SourceStartBlock, DestStartBlock uint64 // Only for first time job add. + USDCConfig USDCConfig +} + +type USDCConfig struct { + SourceTokenAddress common.Address + SourceMessageTransmitterAddress common.Address + AttestationAPI string + AttestationAPITimeoutSeconds uint + // AttestationAPIIntervalMilliseconds can be set to -1 to disable or 0 to use a default interval. + AttestationAPIIntervalMilliseconds int +} + +type ExecPluginConfig struct { + SourceStartBlock, DestStartBlock uint64 // Only for first time job add. + IsSourceProvider bool + USDCConfig USDCConfig + JobID string +} + +func (e ExecPluginConfig) Encode() ([]byte, error) { + bytes, err := json.Marshal(e) + if err != nil { + return nil, err + } + return bytes, nil +} + +func (uc *USDCConfig) ValidateUSDCConfig() error { + if uc.AttestationAPI == "" { + return errors.New("AttestationAPI is required") + } + if uc.AttestationAPIIntervalMilliseconds < -1 { + return errors.New("AttestationAPIIntervalMilliseconds must be -1 to disable, 0 for default or greater to define the exact interval") + } + if uc.SourceTokenAddress == utils.ZeroAddress { + return errors.New("SourceTokenAddress is required") + } + if uc.SourceMessageTransmitterAddress == utils.ZeroAddress { + return errors.New("SourceMessageTransmitterAddress is required") + } + + return nil +} diff --git a/core/services/ocr2/plugins/ccip/config/config_test.go b/core/services/ocr2/plugins/ccip/config/config_test.go new file mode 100644 index 00000000000..e6207aa2231 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/config_test.go @@ -0,0 +1,234 @@ +package config + +import ( + "encoding/json" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +func TestCommitConfig(t *testing.T) { + tests := []struct { + name string + cfg CommitPluginJobSpecConfig + expectedValidationError error + }{ + { + name: "valid config", + cfg: CommitPluginJobSpecConfig{ + SourceStartBlock: 222, + DestStartBlock: 333, + OffRamp: ccipcalc.HexToAddress("0x123"), + TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`, + PriceGetterConfig: &DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]AggregatorPriceConfig{ + common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): { + ChainID: 1000, + AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"), + }, + common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): { + ChainID: 1337, + AggregatorContractAddress: common.HexToAddress("0xb80244cc8b0bb18db071c150b36e9bcb8310b236"), + }, + }, + StaticPrices: map[common.Address]StaticPriceConfig{ + common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): { + ChainID: 1057, + Price: big.NewInt(1000000000000000000), + }, + }, + }, + }, + expectedValidationError: nil, + }, + { + name: "missing dynamic aggregator contract address", + cfg: CommitPluginJobSpecConfig{ + SourceStartBlock: 222, + DestStartBlock: 333, + OffRamp: ccipcalc.HexToAddress("0x123"), + TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`, + PriceGetterConfig: &DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]AggregatorPriceConfig{ + common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): { + ChainID: 1000, + AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"), + }, + common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): { + ChainID: 1337, + AggregatorContractAddress: common.HexToAddress(""), + }, + }, + StaticPrices: map[common.Address]StaticPriceConfig{ + common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): { + ChainID: 1057, + Price: big.NewInt(1000000000000000000), + }, + }, + }, + }, + expectedValidationError: fmt.Errorf("aggregator contract address is zero"), + }, + { + name: "missing chain ID", + cfg: CommitPluginJobSpecConfig{ + SourceStartBlock: 222, + DestStartBlock: 333, + OffRamp: ccipcalc.HexToAddress("0x123"), + TokenPricesUSDPipeline: `merge [type=merge left="{}" right="{\"0xC79b96044906550A5652BCf20a6EA02f139B9Ae5\":\"1000000000000000000\"}"];`, + PriceGetterConfig: &DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]AggregatorPriceConfig{ + common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3"): { + ChainID: 1000, + AggregatorContractAddress: common.HexToAddress("0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf"), + }, + common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272"): { + ChainID: 1337, + AggregatorContractAddress: common.HexToAddress("0xb80244cc8b0bb18db071c150b36e9bcb8310b236"), + }, + }, + StaticPrices: map[common.Address]StaticPriceConfig{ + common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927"): { + ChainID: 0, + Price: big.NewInt(1000000000000000000), + }, + }, + }, + }, + expectedValidationError: fmt.Errorf("chain id is zero"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Verify proper marshall/unmarshalling of the config. + bts, err := json.Marshal(test.cfg) + require.NoError(t, err) + parsedConfig := CommitPluginJobSpecConfig{} + require.NoError(t, json.Unmarshal(bts, &parsedConfig)) + require.Equal(t, test.cfg, parsedConfig) + + // Ensure correctness of price getter configuration. + pgc := test.cfg.PriceGetterConfig + err = pgc.Validate() + if test.expectedValidationError != nil { + require.ErrorContains(t, err, test.expectedValidationError.Error()) + } else { + require.NoError(t, err) + require.Equal(t, uint64(1000), pgc.AggregatorPrices[common.HexToAddress("0x0820c05e1fba1244763a494a52272170c321cad3")].ChainID) + require.Equal(t, uint64(1337), pgc.AggregatorPrices[common.HexToAddress("0x4a98bb4d65347016a7ab6f85bea24b129c9a1272")].ChainID) + require.Equal(t, uint64(1057), pgc.StaticPrices[common.HexToAddress("0xec8c353470ccaa4f43067fcde40558e084a12927")].ChainID) + } + }) + } +} + +func TestExecutionConfig(t *testing.T) { + exampleConfig := ExecPluginJobSpecConfig{ + SourceStartBlock: 222, + DestStartBlock: 333, + } + + bts, err := json.Marshal(exampleConfig) + require.NoError(t, err) + + parsedConfig := ExecPluginJobSpecConfig{} + require.NoError(t, json.Unmarshal(bts, &parsedConfig)) + + require.Equal(t, exampleConfig, parsedConfig) +} + +func TestUSDCValidate(t *testing.T) { + testcases := []struct { + config USDCConfig + err string + }{ + { + config: USDCConfig{}, + err: "AttestationAPI is required", + }, + { + config: USDCConfig{ + AttestationAPI: "api", + }, + err: "SourceTokenAddress is required", + }, + { + config: USDCConfig{ + AttestationAPI: "api", + SourceTokenAddress: utils.ZeroAddress, + }, + err: "SourceTokenAddress is required", + }, + { + config: USDCConfig{ + AttestationAPI: "api", + SourceTokenAddress: utils.RandomAddress(), + }, + err: "SourceMessageTransmitterAddress is required", + }, + { + config: USDCConfig{ + AttestationAPI: "api", + SourceTokenAddress: utils.RandomAddress(), + SourceMessageTransmitterAddress: utils.ZeroAddress, + }, + err: "SourceMessageTransmitterAddress is required", + }, + { + config: USDCConfig{ + AttestationAPI: "api", + SourceTokenAddress: utils.RandomAddress(), + SourceMessageTransmitterAddress: utils.RandomAddress(), + }, + err: "", + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(fmt.Sprintf("error = %s", tc.err), func(t *testing.T) { + t.Parallel() + err := tc.config.ValidateUSDCConfig() + if tc.err != "" { + require.ErrorContains(t, err, tc.err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestUnmarshallDynamicPriceConfig(t *testing.T) { + jsonCfg := ` +{ + "aggregatorPrices": { + "0x0820c05e1fba1244763a494a52272170c321cad3": { + "chainID": "1000", + "contractAddress": "0xb8dabd288955d302d05ca6b011bb46dfa3ea7acf" + }, + "0x4a98bb4d65347016a7ab6f85bea24b129c9a1272": { + "chainID": "1337", + "contractAddress": "0xb80244cc8b0bb18db071c150b36e9bcb8310b236" + } + }, + "staticPrices": { + "0xec8c353470ccaa4f43067fcde40558e084a12927": { + "chainID": "1057", + "price": 1000000000000000000 + } + } +} +` + var cfg DynamicPriceGetterConfig + err := json.Unmarshal([]byte(jsonCfg), &cfg) + require.NoError(t, err) + err = cfg.Validate() + require.NoError(t, err) +} diff --git a/core/services/ocr2/plugins/ccip/config/offchain_config.go b/core/services/ocr2/plugins/ccip/config/offchain_config.go new file mode 100644 index 00000000000..f8fba3f1bcb --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/offchain_config.go @@ -0,0 +1,26 @@ +package config + +import ( + "encoding/json" +) + +type OffchainConfig interface { + Validate() error +} + +func DecodeOffchainConfig[T OffchainConfig](encodedConfig []byte) (T, error) { + var result T + err := json.Unmarshal(encodedConfig, &result) + if err != nil { + return result, err + } + err = result.Validate() + if err != nil { + return result, err + } + return result, nil +} + +func EncodeOffchainConfig[T OffchainConfig](occ T) ([]byte, error) { + return json.Marshal(occ) +} diff --git a/core/services/ocr2/plugins/ccip/config/type_and_version.go b/core/services/ocr2/plugins/ccip/config/type_and_version.go new file mode 100644 index 00000000000..fdfd892b087 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/config/type_and_version.go @@ -0,0 +1,73 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/Masterminds/semver/v3" + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + type_and_version "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/type_and_version_interface_wrapper" +) + +type ContractType string + +var ( + EVM2EVMOnRamp ContractType = "EVM2EVMOnRamp" + EVM2EVMOffRamp ContractType = "EVM2EVMOffRamp" + CommitStore ContractType = "CommitStore" + PriceRegistry ContractType = "PriceRegistry" + ContractTypes = mapset.NewSet[ContractType]( + EVM2EVMOffRamp, + EVM2EVMOnRamp, + CommitStore, + PriceRegistry, + ) +) + +func VerifyTypeAndVersion(addr common.Address, client bind.ContractBackend, expectedType ContractType) (semver.Version, error) { + contractType, version, err := TypeAndVersion(addr, client) + if err != nil { + return semver.Version{}, fmt.Errorf("failed getting type and version %w", err) + } + if contractType != expectedType { + return semver.Version{}, fmt.Errorf("wrong contract type %s", contractType) + } + return version, nil +} + +func TypeAndVersion(addr common.Address, client bind.ContractBackend) (ContractType, semver.Version, error) { + tv, err := type_and_version.NewTypeAndVersionInterface(addr, client) + if err != nil { + return "", semver.Version{}, err + } + tvStr, err := tv.TypeAndVersion(nil) + if err != nil { + return "", semver.Version{}, fmt.Errorf("error calling typeAndVersion on addr: %s %w", addr.String(), err) + } + + contractType, versionStr, err := ParseTypeAndVersion(tvStr) + if err != nil { + return "", semver.Version{}, err + } + v, err := semver.NewVersion(versionStr) + if err != nil { + return "", semver.Version{}, fmt.Errorf("failed parsing version %s: %w", versionStr, err) + } + + if !ContractTypes.Contains(ContractType(contractType)) { + return "", semver.Version{}, fmt.Errorf("unrecognized contract type %v", contractType) + } + return ContractType(contractType), *v, nil +} + +func ParseTypeAndVersion(tvStr string) (string, string, error) { + typeAndVersionValues := strings.Split(tvStr, " ") + + if len(typeAndVersionValues) < 2 { + return "", "", fmt.Errorf("invalid type and version %s", tvStr) + } + return typeAndVersionValues[0], typeAndVersionValues[1], nil +} diff --git a/core/services/ocr2/plugins/ccip/exportinternal.go b/core/services/ocr2/plugins/ccip/exportinternal.go new file mode 100644 index 00000000000..2a5767ac85d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/exportinternal.go @@ -0,0 +1,135 @@ +package ccip + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" +) + +func GenericAddrToEvm(addr ccip.Address) (common.Address, error) { + return ccipcalc.GenericAddrToEvm(addr) +} + +func EvmAddrToGeneric(addr common.Address) ccip.Address { + return ccipcalc.EvmAddrToGeneric(addr) +} + +func NewEvmPriceRegistry(lp logpoller.LogPoller, ec client.Client, lggr logger.Logger, pluginLabel string) *ccipdataprovider.EvmPriceRegistry { + return ccipdataprovider.NewEvmPriceRegistry(lp, ec, lggr, pluginLabel) +} + +type VersionFinder = factory.VersionFinder + +func NewCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address ccip.Address, ec client.Client, lp logpoller.LogPoller) (ccipdata.CommitStoreReader, error) { + return factory.NewCommitStoreReader(lggr, versionFinder, address, ec, lp) +} + +func CloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address ccip.Address, ec client.Client, lp logpoller.LogPoller) error { + return factory.CloseCommitStoreReader(lggr, versionFinder, address, ec, lp) +} + +func NewOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr ccip.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, registerFilters bool) (ccipdata.OffRampReader, error) { + return factory.NewOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, registerFilters) +} + +func CloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr ccip.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) error { + return factory.CloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice) +} + +func NewEvmVersionFinder() factory.EvmVersionFinder { + return factory.NewEvmVersionFinder() +} + +func NewOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress ccip.Address, sourceLP logpoller.LogPoller, source client.Client) (ccipdata.OnRampReader, error) { + return factory.NewOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source) +} + +func CloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress ccip.Address, sourceLP logpoller.LogPoller, source client.Client) error { + return factory.CloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source) +} + +type OffRampReader = ccipdata.OffRampReader + +type DynamicPriceGetterClient = pricegetter.DynamicPriceGetterClient + +type DynamicPriceGetter = pricegetter.DynamicPriceGetter + +func NewDynamicPriceGetterClient(batchCaller rpclib.EvmBatchCaller) DynamicPriceGetterClient { + return pricegetter.NewDynamicPriceGetterClient(batchCaller) +} + +func NewDynamicPriceGetter(cfg config.DynamicPriceGetterConfig, evmClients map[uint64]DynamicPriceGetterClient) (*DynamicPriceGetter, error) { + return pricegetter.NewDynamicPriceGetter(cfg, evmClients) +} + +func NewDynamicLimitedBatchCaller( + lggr logger.Logger, batchSender rpclib.BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint, +) *rpclib.DynamicLimitedBatchCaller { + return rpclib.NewDynamicLimitedBatchCaller(lggr, batchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit) +} + +func NewUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller, registerFilters bool) (*ccipdata.USDCReaderImpl, error) { + return ccipdata.NewUSDCReader(lggr, jobID, transmitter, lp, registerFilters) +} + +func CloseUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller) error { + return ccipdata.CloseUSDCReader(lggr, jobID, transmitter, lp) +} + +type USDCReaderImpl = ccipdata.USDCReaderImpl + +var DefaultRpcBatchSizeLimit = rpclib.DefaultRpcBatchSizeLimit +var DefaultRpcBatchBackOffMultiplier = rpclib.DefaultRpcBatchBackOffMultiplier +var DefaultMaxParallelRpcCalls = rpclib.DefaultMaxParallelRpcCalls + +func NewEVMTokenPoolBatchedReader(lggr logger.Logger, remoteChainSelector uint64, offRampAddress ccip.Address, evmBatchCaller rpclib.EvmBatchCaller) (*batchreader.EVMTokenPoolBatchedReader, error) { + return batchreader.NewEVMTokenPoolBatchedReader(lggr, remoteChainSelector, offRampAddress, evmBatchCaller) +} + +type ChainAgnosticPriceRegistry struct { + p ChainAgnosticPriceRegistryFactory +} + +// [ChainAgnosticPriceRegistryFactory] is satisfied by [commontypes.CCIPCommitProvider] and [commontypes.CCIPExecProvider] +type ChainAgnosticPriceRegistryFactory interface { + NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error) +} + +func (c *ChainAgnosticPriceRegistry) NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error) { + return c.p.NewPriceRegistryReader(ctx, addr) +} + +func NewChainAgnosticPriceRegistry(provider ChainAgnosticPriceRegistryFactory) *ChainAgnosticPriceRegistry { + return &ChainAgnosticPriceRegistry{provider} +} + +type JSONCommitOffchainConfigV1_2_0 = v1_2_0.JSONCommitOffchainConfig +type CommitOnchainConfig = ccipdata.CommitOnchainConfig + +func NewCommitOffchainConfig( + gasPriceDeviationPPB uint32, + gasPriceHeartBeat time.Duration, + tokenPriceDeviationPPB uint32, + tokenPriceHeartBeat time.Duration, + inflightCacheExpiry time.Duration, + priceReportingDisabled bool, +) ccip.CommitOffchainConfig { + return ccipdata.NewCommitOffchainConfig(gasPriceDeviationPPB, gasPriceHeartBeat, tokenPriceDeviationPPB, tokenPriceHeartBeat, inflightCacheExpiry, priceReportingDisabled) +} diff --git a/core/services/ocr2/plugins/ccip/integration_legacy_test.go b/core/services/ocr2/plugins/ccip/integration_legacy_test.go new file mode 100644 index 00000000000..9bc94b5fe45 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/integration_legacy_test.go @@ -0,0 +1,599 @@ +package ccip_test + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + evm_2_evm_onramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + testhelpers_new "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + testhelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0" +) + +func TestIntegration_legacy_CCIP(t *testing.T) { + // Run the batches of tests for both pipeline and dynamic price getter setups. + // We will remove the pipeline batch once the feature is deleted from the code. + tests := []struct { + name string + withPipeline bool + }{ + { + name: "with pipeline", + withPipeline: true, + }, + { + name: "with dynamic price getter", + withPipeline: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ccipTH := testhelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector) + + tokenPricesUSDPipeline := "" + priceGetterConfigJson := "" + + if test.withPipeline { + // Set up a test pipeline. + testPricePipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t) + defer linkUSD.Close() + defer ethUSD.Close() + tokenPricesUSDPipeline = testPricePipeline + } else { + // Set up a test price getter. + // Set up the aggregators here to avoid modifying ccipTH. + aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18)) + require.NoError(t, err) + _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + + priceGetterConfig := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + ccipTH.Source.LinkToken.Address(): { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcLnkAddr, + }, + ccipTH.Source.WrappedNative.Address(): { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcNatAddr, + }, + ccipTH.Dest.LinkToken.Address(): { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstLnkAddr, + }, + ccipTH.Dest.WrappedNative.Address(): { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstLnkAddr, + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{}, + } + priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ") + require.NoError(t, err) + priceGetterConfigJson = string(priceGetterConfigBytes) + } + + jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfigJson, "") + + currentSeqNum := 1 + + t.Run("single", func(t *testing.T) { + tokenAmount := big.NewInt(500000003) // prime number + gasLimit := big.NewInt(200_003) // prime number + + extraArgs, err2 := testhelpers.GetEVMExtraArgsV1(gasLimit, false) + require.NoError(t, err2) + + sourceBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{ + {Name: testhelpers.SourcePool, Addr: ccipTH.Source.LinkTokenPool.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.OnRamp, Addr: ccipTH.Source.OnRamp.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.SourceRouter, Addr: ccipTH.Source.Router.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.SourcePriceRegistry, Addr: ccipTH.Source.PriceRegistry.Address(), Getter: ccipTH.GetSourceLinkBalance}, + }) + require.NoError(t, err2) + destBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{ + {Name: testhelpers.Receiver, Addr: ccipTH.Dest.Receivers[0].Receiver.Address(), Getter: ccipTH.GetDestLinkBalance}, + {Name: testhelpers.DestPool, Addr: ccipTH.Dest.LinkTokenPool.Address(), Getter: ccipTH.GetDestLinkBalance}, + {Name: testhelpers.OffRamp, Addr: ccipTH.Dest.OffRamp.Address(), Getter: ccipTH.GetDestLinkBalance}, + }) + require.NoError(t, err2) + + ccipTH.Source.User.Value = tokenAmount + _, err2 = ccipTH.Source.WrappedNative.Deposit(ccipTH.Source.User) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + ccipTH.Source.User.Value = nil + + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: ccipTH.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + { + Token: ccipTH.Source.WrappedNative.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: ccipTH.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err2) + // Currently no overhead and 10gwei dest gas price. So fee is simply (gasLimit * gasPrice)* link/native + // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + _, err2 = ccipTH.Source.WrappedNative.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), tokenAmount) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + + ccipTH.SendRequest(t, msg) + // Should eventually see this executed. + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum) + ccipTH.EventuallyReportCommitted(t, currentSeqNum) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + + // Asserts + // 1) The total pool input == total pool output + // 2) Pool flow equals tokens sent + // 3) Sent tokens arrive at the receiver + ccipTH.AssertBalances(t, []testhelpers.BalanceAssertion{ + { + Name: testhelpers.SourcePool, + Address: ccipTH.Source.LinkTokenPool.Address(), + Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePool], tokenAmount.String()).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourcePriceRegistry, + Address: ccipTH.Source.PriceRegistry.Address(), + Expected: sourceBalances[testhelpers.SourcePriceRegistry].String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + // Fees end up in the onramp. + Name: testhelpers.OnRamp, + Address: ccipTH.Source.OnRamp.Address(), + Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePriceRegistry], fee.String()).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Address: ccipTH.Source.Router.Address(), + Expected: sourceBalances[testhelpers.SourceRouter].String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.Receiver, + Address: ccipTH.Dest.Receivers[0].Receiver.Address(), + Expected: testhelpers.MustAddBigInt(destBalances[testhelpers.Receiver], tokenAmount.String()).String(), + Getter: ccipTH.GetDestLinkBalance, + }, + { + Name: testhelpers.DestPool, + Address: ccipTH.Dest.LinkTokenPool.Address(), + Expected: testhelpers.MustSubBigInt(destBalances[testhelpers.DestPool], tokenAmount.String()).String(), + Getter: ccipTH.GetDestLinkBalance, + }, + { + Name: testhelpers.OffRamp, + Address: ccipTH.Dest.OffRamp.Address(), + Expected: destBalances[testhelpers.OffRamp].String(), + Getter: ccipTH.GetDestLinkBalance, + }, + }) + currentSeqNum++ + }) + + t.Run("multiple batches", func(t *testing.T) { + tokenAmount := big.NewInt(500000003) + gasLimit := big.NewInt(250_000) + + var txs []*gethtypes.Transaction + // Enough to require batched executions as gasLimit per tx is 250k -> 500k -> 750k .... + // The actual gas usage of executing 15 messages is higher than the gas limit for + // a single tx. This means that when batching is turned off, and we simply include + // all txs without checking gas, this also fails. + n := 15 + for i := 0; i < n; i++ { + txGasLimit := new(big.Int).Mul(gasLimit, big.NewInt(int64(i+1))) + extraArgs, err2 := testhelpers.GetEVMExtraArgsV1(txGasLimit, false) + require.NoError(t, err2) + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: ccipTH.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: ccipTH.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err2) + // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice. + // require.Equal(t, new(big.Int).Mul(txGasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err2) + tx, err2 := ccipTH.Source.Router.CcipSend(ccipTH.Source.User, ccipTH.Dest.ChainSelector, msg) + require.NoError(t, err2) + txs = append(txs, tx) + } + + // Send a batch of requests in a single block + testhelpers_new.ConfirmTxs(t, txs, ccipTH.Source.Chain) + for i := 0; i < n; i++ { + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum+i) + } + // Should see a report with the full range + ccipTH.EventuallyReportCommitted(t, currentSeqNum+n-1) + // Should all be executed + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum+n-1) + for _, execLog := range executionLogs { + ccipTH.AssertExecState(t, execLog, testhelpers.ExecutionStateSuccess) + } + + currentSeqNum += n + }) + + // Deploy new on ramp,Commit store,off ramp + // Delete v1 jobs + // Send a number of requests + // Upgrade the router with new contracts + // create new jobs + // Verify all pending requests are sent after the contracts are upgraded + t.Run("upgrade contracts and verify requests can be sent with upgraded contract", func(t *testing.T) { + gasLimit := big.NewInt(200_003) // prime number + tokenAmount := big.NewInt(100) + commitStoreV1 := ccipTH.Dest.CommitStore + offRampV1 := ccipTH.Dest.OffRamp + onRampV1 := ccipTH.Source.OnRamp + // deploy v2 contracts + ccipTH.DeployNewOnRamp(t) + ccipTH.DeployNewCommitStore(t) + ccipTH.DeployNewOffRamp(t) + + // send a request as the v2 contracts are not enabled in router it should route through the v1 contracts + t.Logf("sending request for seqnum %d", currentSeqNum) + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + t.Logf("verifying seqnum %d on previous onRamp %s", currentSeqNum, onRampV1.Address().Hex()) + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum, onRampV1.Address()) + ccipTH.EventuallyReportCommitted(t, currentSeqNum, commitStoreV1.Address()) + executionLog := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum, offRampV1.Address()) + ccipTH.AssertExecState(t, executionLog[0], testhelpers.ExecutionStateSuccess, offRampV1.Address()) + + nonceAtOnRampV1, err := onRampV1.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from onRamp") + require.Equal(t, currentSeqNum, int(nonceAtOnRampV1)) + nonceAtOffRampV1, err := offRampV1.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from offRamp") + require.Equal(t, currentSeqNum, int(nonceAtOffRampV1)) + + // enable the newly deployed contracts + newConfigBlock := ccipTH.Dest.Chain.Blockchain().CurrentBlock().Number.Int64() + ccipTH.EnableOnRamp(t) + ccipTH.EnableCommitStore(t) + ccipTH.EnableOffRamp(t) + srcStartBlock := ccipTH.Source.Chain.Blockchain().CurrentBlock().Number.Uint64() + + // send a number of requests, the requests should not be delivered yet as the previous contracts are not configured + // with the router anymore + startSeq := 1 + noOfRequests := 5 + endSeqNum := startSeq + noOfRequests + for i := startSeq; i <= endSeqNum; i++ { + t.Logf("sending request for seqnum %d", i) + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + ccipTH.EventuallySendRequested(t, uint64(i)) + } + + // delete v1 jobs + for _, node := range ccipTH.Nodes { + id := node.FindJobIDForContract(t, commitStoreV1.Address()) + require.Greater(t, id, int32(0)) + t.Logf("deleting job %d", id) + err = node.App.DeleteJob(context.Background(), id) + require.NoError(t, err) + id = node.FindJobIDForContract(t, offRampV1.Address()) + require.Greater(t, id, int32(0)) + t.Logf("deleting job %d", id) + err = node.App.DeleteJob(context.Background(), id) + require.NoError(t, err) + } + + // Commit on both chains to reach Finality + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + + // create new jobs + jobParams = ccipTH.NewCCIPJobSpecParams(tokenPricesUSDPipeline, priceGetterConfigJson, newConfigBlock, "") + jobParams.Version = "v2" + jobParams.SourceStartBlock = srcStartBlock + ccipTH.AddAllJobs(t, jobParams) + committedSeqNum := uint64(0) + // Now the requests should be delivered + for i := startSeq; i <= endSeqNum; i++ { + t.Logf("verifying seqnum %d", i) + ccipTH.AllNodesHaveReqSeqNum(t, i) + if committedSeqNum < uint64(i+1) { + committedSeqNum = ccipTH.EventuallyReportCommitted(t, i) + } + ccipTH.EventuallyExecutionStateChangedToSuccess(t, []uint64{uint64(i)}, uint64(newConfigBlock)) + } + + // nonces should be correctly synced from v1 contracts for the sender + nonceAtOnRampV2, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from onRamp") + nonceAtOffRampV2, err := ccipTH.Dest.OffRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from offRamp") + require.Equal(t, nonceAtOnRampV1+uint64(noOfRequests)+1, nonceAtOnRampV2, "nonce should be synced from v1 onRamps") + require.Equal(t, nonceAtOffRampV1+uint64(noOfRequests)+1, nonceAtOffRampV2, "nonce should be synced from v1 offRamps") + currentSeqNum = endSeqNum + 1 + }) + + t.Run("pay nops", func(t *testing.T) { + linkToTransferToOnRamp := big.NewInt(1e18) + + // transfer some link to onramp to pay the nops + _, err := ccipTH.Source.LinkToken.Transfer(ccipTH.Source.User, ccipTH.Source.OnRamp.Address(), linkToTransferToOnRamp) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + srcBalReq := []testhelpers.BalanceReq{ + { + Name: testhelpers.Sender, + Addr: ccipTH.Source.User.From, + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + Name: testhelpers.OnRampNative, + Addr: ccipTH.Source.OnRamp.Address(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + Name: testhelpers.OnRamp, + Addr: ccipTH.Source.OnRamp.Address(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Addr: ccipTH.Source.Router.Address(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + } + + var nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight + var totalWeight uint16 + nodes := ccipTH.Nodes + for i := range nodes { + // For now set the transmitter addresses to be the same as the payee addresses + nodes[i].PaymentReceiver = nodes[i].Transmitter + nopsAndWeights = append(nopsAndWeights, evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{ + Nop: nodes[i].PaymentReceiver, + Weight: 5, + }) + totalWeight += 5 + srcBalReq = append(srcBalReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("node %d", i), + Addr: nodes[i].PaymentReceiver, + Getter: ccipTH.GetSourceLinkBalance, + }) + } + srcBalances, err := testhelpers.GetBalances(t, srcBalReq) + require.NoError(t, err) + + // set nops on the onramp + ccipTH.SetNopsOnRamp(t, nopsAndWeights) + + // send a message + extraArgs, err := testhelpers.GetEVMExtraArgsV1(big.NewInt(200_000), true) + require.NoError(t, err) + + // FeeToken is empty, indicating it should use native token + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[1].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{}, + ExtraArgs: extraArgs, + FeeToken: common.Address{}, + } + fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err) + + // verify message is sent + ccipTH.Source.User.Value = fee + ccipTH.SendRequest(t, msg) + ccipTH.Source.User.Value = nil + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum) + ccipTH.EventuallyReportCommitted(t, currentSeqNum) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + currentSeqNum++ + + // get the nop fee + nopFee, err := ccipTH.Source.OnRamp.GetNopFeesJuels(nil) + require.NoError(t, err) + t.Log("nopFee", nopFee) + + // withdraw fees and verify there is still fund left for nop payment + _, err = ccipTH.Source.OnRamp.WithdrawNonLinkFees( + ccipTH.Source.User, + ccipTH.Source.WrappedNative.Address(), + ccipTH.Source.User.From, + ) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + // pay nops + _, err = ccipTH.Source.OnRamp.PayNops(ccipTH.Source.User) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + srcBalanceAssertions := []testhelpers.BalanceAssertion{ + { + // Onramp should not have any balance left in wrapped native + Name: testhelpers.OnRampNative, + Address: ccipTH.Source.OnRamp.Address(), + Expected: big.NewInt(0).String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + // Onramp should have the remaining link after paying nops + Name: testhelpers.OnRamp, + Address: ccipTH.Source.OnRamp.Address(), + Expected: new(big.Int).Sub(srcBalances[testhelpers.OnRamp], nopFee).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Address: ccipTH.Source.Router.Address(), + Expected: srcBalances[testhelpers.SourceRouter].String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + // onRamp's balance (of previously sent fee during message sending) should have been transferred to + // the owner as a result of WithdrawNonLinkFees + { + Name: testhelpers.Sender, + Address: ccipTH.Source.User.From, + Expected: fee.String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + } + + // the nodes should be paid according to the weights assigned + for i, node := range nodes { + paymentWeight := float64(nopsAndWeights[i].Weight) / float64(totalWeight) + paidInFloat := paymentWeight * float64(nopFee.Int64()) + paid, _ := new(big.Float).SetFloat64(paidInFloat).Int64() + bal := new(big.Int).Add( + new(big.Int).SetInt64(paid), + srcBalances[fmt.Sprintf("node %d", i)]).String() + srcBalanceAssertions = append(srcBalanceAssertions, testhelpers.BalanceAssertion{ + Name: fmt.Sprintf("node %d", i), + Address: node.PaymentReceiver, + Expected: bal, + Getter: ccipTH.GetSourceLinkBalance, + }) + } + ccipTH.AssertBalances(t, srcBalanceAssertions) + }) + + // Keep on sending a bunch of messages + // In the meantime update onchainConfig with new price registry address + // Verify if the jobs can pick up updated config + // Verify if all the messages are sent + t.Run("config change or price registry update while requests are inflight", func(t *testing.T) { + gasLimit := big.NewInt(200_003) // prime number + tokenAmount := big.NewInt(100) + msgWg := &sync.WaitGroup{} + msgWg.Add(1) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + startSeq := currentSeqNum + endSeq := currentSeqNum + 20 + + // send message with the old configs + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + + go func(ccipContracts testhelpers.CCIPContracts, currentSeqNum int) { + seqNumber := currentSeqNum + 1 + defer msgWg.Done() + for { + <-ticker.C // wait for ticker + t.Logf("sending request for seqnum %d", seqNumber) + ccipContracts.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipContracts.Source.Chain.Commit() + seqNumber++ + if seqNumber == endSeq { + return + } + } + }(ccipTH.CCIPContracts, currentSeqNum) + + ccipTH.DeployNewPriceRegistry(t) + commitOnchainConfig := ccipTH.CreateDefaultCommitOnchainConfig(t) + commitOffchainConfig := ccipTH.CreateDefaultCommitOffchainConfig(t) + execOnchainConfig := ccipTH.CreateDefaultExecOnchainConfig(t) + execOffchainConfig := ccipTH.CreateDefaultExecOffchainConfig(t) + + ccipTH.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig) + + // wait for all requests to be complete + msgWg.Wait() + for i := startSeq; i < endSeq; i++ { + ccipTH.AllNodesHaveReqSeqNum(t, i) + ccipTH.EventuallyReportCommitted(t, i) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, i, i) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + } + + for i, node := range ccipTH.Nodes { + t.Logf("verifying node %d", i) + node.EventuallyNodeUsesNewCommitConfig(t, ccipTH, ccipdata.CommitOnchainConfig{ + PriceRegistry: ccipTH.Dest.PriceRegistry.Address(), + }) + node.EventuallyNodeUsesNewExecConfig(t, ccipTH, v1_2_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: testhelpers.PermissionLessExecutionThresholdSeconds, + Router: ccipTH.Dest.Router.Address(), + PriceRegistry: ccipTH.Dest.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxNumberOfTokensPerMsg: 5, + MaxPoolReleaseOrMintGas: 200_000, + }) + node.EventuallyNodeUsesUpdatedPriceRegistry(t, ccipTH) + } + currentSeqNum = endSeq + }) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/integration_test.go b/core/services/ocr2/plugins/ccip/integration_test.go new file mode 100644 index 00000000000..202d2ef2304 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/integration_test.go @@ -0,0 +1,644 @@ +package ccip_test + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration" +) + +func TestIntegration_CCIP(t *testing.T) { + // Run the batches of tests for both pipeline and dynamic price getter setups. + // We will remove the pipeline batch once the feature is deleted from the code. + tests := []struct { + name string + withPipeline bool + allowOutOfOrderExecution bool + }{ + { + name: "with pipeline allowOutOfOrderExecution true", + withPipeline: true, + allowOutOfOrderExecution: true, + }, + { + name: "with dynamic price getter allowOutOfOrderExecution false", + withPipeline: false, + allowOutOfOrderExecution: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ccipTH := integrationtesthelpers.SetupCCIPIntegrationTH(t, testhelpers.SourceChainID, testhelpers.SourceChainSelector, testhelpers.DestChainID, testhelpers.DestChainSelector) + + tokenPricesUSDPipeline := "" + priceGetterConfigJson := "" + + if test.withPipeline { + // Set up a test pipeline. + testPricePipeline, linkUSD, ethUSD := ccipTH.CreatePricesPipeline(t) + defer linkUSD.Close() + defer ethUSD.Close() + tokenPricesUSDPipeline = testPricePipeline + } else { + // Set up a test price getter. + // Set up the aggregators here to avoid modifying ccipTH. + aggSrcNatAddr, _, aggSrcNat, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(2e18)) + require.NoError(t, err) + _, err = aggSrcNat.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(17000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggSrcLnkAddr, _, aggSrcLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Source.User, ccipTH.Source.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggSrcLnk.UpdateRoundData(ccipTH.Source.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + aggDstLnkAddr, _, aggDstLnk, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(ccipTH.Dest.User, ccipTH.Dest.Chain, 18, big.NewInt(3e18)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + _, err = aggDstLnk.UpdateRoundData(ccipTH.Dest.User, big.NewInt(50), big.NewInt(8000000), big.NewInt(1000), big.NewInt(1000)) + require.NoError(t, err) + ccipTH.Dest.Chain.Commit() + + priceGetterConfig := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + ccipTH.Source.LinkToken.Address(): { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcLnkAddr, + }, + ccipTH.Source.WrappedNative.Address(): { + ChainID: ccipTH.Source.ChainID, + AggregatorContractAddress: aggSrcNatAddr, + }, + ccipTH.Dest.LinkToken.Address(): { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstLnkAddr, + }, + ccipTH.Dest.WrappedNative.Address(): { + ChainID: ccipTH.Dest.ChainID, + AggregatorContractAddress: aggDstLnkAddr, + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{}, + } + priceGetterConfigBytes, err := json.MarshalIndent(priceGetterConfig, "", " ") + require.NoError(t, err) + priceGetterConfigJson = string(priceGetterConfigBytes) + } + + jobParams := ccipTH.SetUpNodesAndJobs(t, tokenPricesUSDPipeline, priceGetterConfigJson, "") + + // track sequence number and nonce separately since nonce doesn't bump for messages with allowOutOfOrderExecution == true, + // but sequence number always bumps. + // for this test, when test.outOfOrder == false, sequence number and nonce are equal. + // when test.outOfOrder == true, nonce is not bumped at all, so sequence number and nonce are NOT equal. + currentSeqNum := 1 + currentNonce := uint64(1) + + t.Run("single", func(t *testing.T) { + tokenAmount := big.NewInt(500000003) // prime number + gasLimit := big.NewInt(200_003) // prime number + + extraArgs, err2 := testhelpers.GetEVMExtraArgsV2(gasLimit, test.allowOutOfOrderExecution) + require.NoError(t, err2) + + sourceBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{ + {Name: testhelpers.SourcePool, Addr: ccipTH.Source.LinkTokenPool.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.OnRamp, Addr: ccipTH.Source.OnRamp.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.SourceRouter, Addr: ccipTH.Source.Router.Address(), Getter: ccipTH.GetSourceLinkBalance}, + {Name: testhelpers.SourcePriceRegistry, Addr: ccipTH.Source.PriceRegistry.Address(), Getter: ccipTH.GetSourceLinkBalance}, + }) + require.NoError(t, err2) + destBalances, err2 := testhelpers.GetBalances(t, []testhelpers.BalanceReq{ + {Name: testhelpers.Receiver, Addr: ccipTH.Dest.Receivers[0].Receiver.Address(), Getter: ccipTH.GetDestLinkBalance}, + {Name: testhelpers.DestPool, Addr: ccipTH.Dest.LinkTokenPool.Address(), Getter: ccipTH.GetDestLinkBalance}, + {Name: testhelpers.OffRamp, Addr: ccipTH.Dest.OffRamp.Address(), Getter: ccipTH.GetDestLinkBalance}, + }) + require.NoError(t, err2) + + ccipTH.Source.User.Value = tokenAmount + _, err2 = ccipTH.Source.WrappedNative.Deposit(ccipTH.Source.User) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + ccipTH.Source.User.Value = nil + + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: ccipTH.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + { + Token: ccipTH.Source.WrappedNative.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: ccipTH.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err2) + // Currently no overhead and 10gwei dest gas price. So fee is simply (gasLimit * gasPrice)* link/native + // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + _, err2 = ccipTH.Source.WrappedNative.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), tokenAmount) + require.NoError(t, err2) + ccipTH.Source.Chain.Commit() + + beforeNonce, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err) + ccipTH.SendRequest(t, msg) + // TODO: can this be moved into SendRequest? + if test.allowOutOfOrderExecution { + // the nonce for that sender must not be bumped for allowOutOfOrderExecution == true messages. + nonce, err2 := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err2) + require.Equal(t, beforeNonce, nonce, "nonce must not be bumped for allowOutOfOrderExecution == true requests") + } else { + // the nonce for that sender must be bumped for allowOutOfOrderExecution == false messages. + nonce, err2 := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err2) + require.Equal(t, beforeNonce+1, nonce, "nonce must be bumped for allowOutOfOrderExecution == false requests") + } + + // Should eventually see this executed. + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum) + ccipTH.EventuallyReportCommitted(t, currentSeqNum) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + + // Asserts + // 1) The total pool input == total pool output + // 2) Pool flow equals tokens sent + // 3) Sent tokens arrive at the receiver + ccipTH.AssertBalances(t, []testhelpers.BalanceAssertion{ + { + Name: testhelpers.SourcePool, + Address: ccipTH.Source.LinkTokenPool.Address(), + Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePool], tokenAmount.String()).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourcePriceRegistry, + Address: ccipTH.Source.PriceRegistry.Address(), + Expected: sourceBalances[testhelpers.SourcePriceRegistry].String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + // Fees end up in the onramp. + Name: testhelpers.OnRamp, + Address: ccipTH.Source.OnRamp.Address(), + Expected: testhelpers.MustAddBigInt(sourceBalances[testhelpers.SourcePriceRegistry], fee.String()).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Address: ccipTH.Source.Router.Address(), + Expected: sourceBalances[testhelpers.SourceRouter].String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.Receiver, + Address: ccipTH.Dest.Receivers[0].Receiver.Address(), + Expected: testhelpers.MustAddBigInt(destBalances[testhelpers.Receiver], tokenAmount.String()).String(), + Getter: ccipTH.GetDestLinkBalance, + }, + { + Name: testhelpers.DestPool, + Address: ccipTH.Dest.LinkTokenPool.Address(), + Expected: testhelpers.MustSubBigInt(destBalances[testhelpers.DestPool], tokenAmount.String()).String(), + Getter: ccipTH.GetDestLinkBalance, + }, + { + Name: testhelpers.OffRamp, + Address: ccipTH.Dest.OffRamp.Address(), + Expected: destBalances[testhelpers.OffRamp].String(), + Getter: ccipTH.GetDestLinkBalance, + }, + }) + currentSeqNum++ + if !test.allowOutOfOrderExecution { + currentNonce = uint64(currentSeqNum) + } + }) + + t.Run("multiple batches", func(t *testing.T) { + tokenAmount := big.NewInt(500000003) + gasLimit := big.NewInt(250_000) + + var txs []*gethtypes.Transaction + // Enough to require batched executions as gasLimit per tx is 250k -> 500k -> 750k .... + // The actual gas usage of executing 15 messages is higher than the gas limit for + // a single tx. This means that when batching is turned off, and we simply include + // all txs without checking gas, this also fails. + n := 15 + for i := 0; i < n; i++ { + txGasLimit := new(big.Int).Mul(gasLimit, big.NewInt(int64(i+1))) + + // interleave ordered and non-ordered messages. + allowOutOfOrderExecution := false + if i%2 == 0 { + allowOutOfOrderExecution = true + } + extraArgs, err2 := testhelpers.GetEVMExtraArgsV2(txGasLimit, allowOutOfOrderExecution) + require.NoError(t, err2) + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[0].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: ccipTH.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: ccipTH.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err2 := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err2) + // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice. + // require.Equal(t, new(big.Int).Mul(txGasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err2 = ccipTH.Source.LinkToken.Approve(ccipTH.Source.User, ccipTH.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err2) + tx, err2 := ccipTH.Source.Router.CcipSend(ccipTH.Source.User, ccipTH.Dest.ChainSelector, msg) + require.NoError(t, err2) + txs = append(txs, tx) + if !allowOutOfOrderExecution { + currentNonce++ + } + } + + // Send a batch of requests in a single block + testhelpers.ConfirmTxs(t, txs, ccipTH.Source.Chain) + for i := 0; i < n; i++ { + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum+i) + } + // Should see a report with the full range + ccipTH.EventuallyReportCommitted(t, currentSeqNum+n-1) + // Should all be executed + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum+n-1) + for _, execLog := range executionLogs { + ccipTH.AssertExecState(t, execLog, testhelpers.ExecutionStateSuccess) + } + + currentSeqNum += n + }) + + // Deploy new on ramp,Commit store,off ramp + // Delete v1 jobs + // Send a number of requests + // Upgrade the router with new contracts + // create new jobs + // Verify all pending requests are sent after the contracts are upgraded + t.Run("upgrade contracts and verify requests can be sent with upgraded contract", func(t *testing.T) { + gasLimit := big.NewInt(200_003) // prime number + tokenAmount := big.NewInt(100) + commitStoreV1 := ccipTH.Dest.CommitStore + offRampV1 := ccipTH.Dest.OffRamp + onRampV1 := ccipTH.Source.OnRamp + // deploy v2 contracts + ccipTH.DeployNewOnRamp(t) + ccipTH.DeployNewCommitStore(t) + ccipTH.DeployNewOffRamp(t) + + // send a request as the v2 contracts are not enabled in router it should route through the v1 contracts + t.Logf("sending request for seqnum %d", currentSeqNum) + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + t.Logf("verifying seqnum %d on previous onRamp %s", currentSeqNum, onRampV1.Address().Hex()) + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum, onRampV1.Address()) + ccipTH.EventuallyReportCommitted(t, currentSeqNum, commitStoreV1.Address()) + executionLog := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum, offRampV1.Address()) + ccipTH.AssertExecState(t, executionLog[0], testhelpers.ExecutionStateSuccess, offRampV1.Address()) + + nonceAtOnRampV1, err := onRampV1.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from onRamp") + require.Equal(t, currentNonce, nonceAtOnRampV1, "nonce should be synced from v1 onRamp") + nonceAtOffRampV1, err := offRampV1.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from offRamp") + require.Equal(t, currentNonce, nonceAtOffRampV1, "nonce should be synced from v1 offRamp") + + // enable the newly deployed contracts + newConfigBlock := ccipTH.Dest.Chain.Blockchain().CurrentBlock().Number.Int64() + ccipTH.EnableOnRamp(t) + ccipTH.EnableCommitStore(t) + ccipTH.EnableOffRamp(t) + srcStartBlock := ccipTH.Source.Chain.Blockchain().CurrentBlock().Number.Uint64() + + // send a number of requests, the requests should not be delivered yet as the previous contracts are not configured + // with the router anymore + startSeq := 1 + noOfRequests := 5 + endSeqNum := startSeq + noOfRequests + for i := startSeq; i <= endSeqNum; i++ { + t.Logf("sending request for seqnum %d", i) + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + ccipTH.EventuallySendRequested(t, uint64(i)) + } + + // delete v1 jobs + for _, node := range ccipTH.Nodes { + id := node.FindJobIDForContract(t, commitStoreV1.Address()) + require.Greater(t, id, int32(0)) + t.Logf("deleting job %d", id) + err = node.App.DeleteJob(context.Background(), id) + require.NoError(t, err) + id = node.FindJobIDForContract(t, offRampV1.Address()) + require.Greater(t, id, int32(0)) + t.Logf("deleting job %d", id) + err = node.App.DeleteJob(context.Background(), id) + require.NoError(t, err) + } + + // Commit on both chains to reach Finality + ccipTH.Source.Chain.Commit() + ccipTH.Dest.Chain.Commit() + + // create new jobs + jobParams = ccipTH.NewCCIPJobSpecParams(tokenPricesUSDPipeline, priceGetterConfigJson, newConfigBlock, "") + jobParams.Version = "v2" + jobParams.SourceStartBlock = srcStartBlock + ccipTH.AddAllJobs(t, jobParams) + committedSeqNum := uint64(0) + // Now the requests should be delivered + for i := startSeq; i <= endSeqNum; i++ { + t.Logf("verifying seqnum %d", i) + ccipTH.AllNodesHaveReqSeqNum(t, i) + if committedSeqNum < uint64(i+1) { + committedSeqNum = ccipTH.EventuallyReportCommitted(t, i) + } + ccipTH.EventuallyExecutionStateChangedToSuccess(t, []uint64{uint64(i)}, uint64(newConfigBlock)) + } + + // nonces should be correctly synced from v1 contracts for the sender + nonceAtOnRampV2, err := ccipTH.Source.OnRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from onRamp") + nonceAtOffRampV2, err := ccipTH.Dest.OffRamp.GetSenderNonce(nil, ccipTH.Source.User.From) + require.NoError(t, err, "getting nonce from offRamp") + require.Equal(t, nonceAtOnRampV1+uint64(noOfRequests)+1, nonceAtOnRampV2, "nonce should be synced from v1 onRamps") + require.Equal(t, nonceAtOffRampV1+uint64(noOfRequests)+1, nonceAtOffRampV2, "nonce should be synced from v1 offRamps") + currentSeqNum = endSeqNum + 1 + if !test.allowOutOfOrderExecution { + currentNonce = uint64(currentSeqNum) + } + }) + + t.Run("pay nops", func(t *testing.T) { + linkToTransferToOnRamp := big.NewInt(1e18) + + // transfer some link to onramp to pay the nops + _, err := ccipTH.Source.LinkToken.Transfer(ccipTH.Source.User, ccipTH.Source.OnRamp.Address(), linkToTransferToOnRamp) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + srcBalReq := []testhelpers.BalanceReq{ + { + Name: testhelpers.Sender, + Addr: ccipTH.Source.User.From, + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + Name: testhelpers.OnRampNative, + Addr: ccipTH.Source.OnRamp.Address(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + Name: testhelpers.OnRamp, + Addr: ccipTH.Source.OnRamp.Address(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Addr: ccipTH.Source.Router.Address(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + } + + var nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight + var totalWeight uint16 + nodes := ccipTH.Nodes + for i := range nodes { + // For now set the transmitter addresses to be the same as the payee addresses + nodes[i].PaymentReceiver = nodes[i].Transmitter + nopsAndWeights = append(nopsAndWeights, evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{ + Nop: nodes[i].PaymentReceiver, + Weight: 5, + }) + totalWeight += 5 + srcBalReq = append(srcBalReq, testhelpers.BalanceReq{ + Name: fmt.Sprintf("node %d", i), + Addr: nodes[i].PaymentReceiver, + Getter: ccipTH.GetSourceLinkBalance, + }) + } + srcBalances, err := testhelpers.GetBalances(t, srcBalReq) + require.NoError(t, err) + + // set nops on the onramp + ccipTH.SetNopsOnRamp(t, nopsAndWeights) + + // send a message + extraArgs, err := testhelpers.GetEVMExtraArgsV2(big.NewInt(200_000), test.allowOutOfOrderExecution) + require.NoError(t, err) + + // FeeToken is empty, indicating it should use native token + msg := router.ClientEVM2AnyMessage{ + Receiver: testhelpers.MustEncodeAddress(t, ccipTH.Dest.Receivers[1].Receiver.Address()), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{}, + ExtraArgs: extraArgs, + FeeToken: common.Address{}, + } + fee, err := ccipTH.Source.Router.GetFee(nil, testhelpers.DestChainSelector, msg) + require.NoError(t, err) + + // verify message is sent + ccipTH.Source.User.Value = fee + ccipTH.SendRequest(t, msg) + ccipTH.Source.User.Value = nil + ccipTH.AllNodesHaveReqSeqNum(t, currentSeqNum) + ccipTH.EventuallyReportCommitted(t, currentSeqNum) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, currentSeqNum, currentSeqNum) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + currentSeqNum++ + if test.allowOutOfOrderExecution { + currentNonce = uint64(currentSeqNum) + } + + // get the nop fee + nopFee, err := ccipTH.Source.OnRamp.GetNopFeesJuels(nil) + require.NoError(t, err) + t.Log("nopFee", nopFee) + + // withdraw fees and verify there is still fund left for nop payment + _, err = ccipTH.Source.OnRamp.WithdrawNonLinkFees( + ccipTH.Source.User, + ccipTH.Source.WrappedNative.Address(), + ccipTH.Source.User.From, + ) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + // pay nops + _, err = ccipTH.Source.OnRamp.PayNops(ccipTH.Source.User) + require.NoError(t, err) + ccipTH.Source.Chain.Commit() + + srcBalanceAssertions := []testhelpers.BalanceAssertion{ + { + // Onramp should not have any balance left in wrapped native + Name: testhelpers.OnRampNative, + Address: ccipTH.Source.OnRamp.Address(), + Expected: big.NewInt(0).String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + { + // Onramp should have the remaining link after paying nops + Name: testhelpers.OnRamp, + Address: ccipTH.Source.OnRamp.Address(), + Expected: new(big.Int).Sub(srcBalances[testhelpers.OnRamp], nopFee).String(), + Getter: ccipTH.GetSourceLinkBalance, + }, + { + Name: testhelpers.SourceRouter, + Address: ccipTH.Source.Router.Address(), + Expected: srcBalances[testhelpers.SourceRouter].String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + // onRamp's balance (of previously sent fee during message sending) should have been transferred to + // the owner as a result of WithdrawNonLinkFees + { + Name: testhelpers.Sender, + Address: ccipTH.Source.User.From, + Expected: fee.String(), + Getter: ccipTH.GetSourceWrappedTokenBalance, + }, + } + + // the nodes should be paid according to the weights assigned + for i, node := range nodes { + paymentWeight := float64(nopsAndWeights[i].Weight) / float64(totalWeight) + paidInFloat := paymentWeight * float64(nopFee.Int64()) + paid, _ := new(big.Float).SetFloat64(paidInFloat).Int64() + bal := new(big.Int).Add( + new(big.Int).SetInt64(paid), + srcBalances[fmt.Sprintf("node %d", i)]).String() + srcBalanceAssertions = append(srcBalanceAssertions, testhelpers.BalanceAssertion{ + Name: fmt.Sprintf("node %d", i), + Address: node.PaymentReceiver, + Expected: bal, + Getter: ccipTH.GetSourceLinkBalance, + }) + } + ccipTH.AssertBalances(t, srcBalanceAssertions) + }) + + // Keep on sending a bunch of messages + // In the meantime update onchainConfig with new price registry address + // Verify if the jobs can pick up updated config + // Verify if all the messages are sent + t.Run("config change or price registry update while requests are inflight", func(t *testing.T) { + gasLimit := big.NewInt(200_003) // prime number + tokenAmount := big.NewInt(100) + msgWg := &sync.WaitGroup{} + msgWg.Add(1) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + startSeq := currentSeqNum + endSeq := currentSeqNum + 20 + + // send message with the old configs + ccipTH.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipTH.Source.Chain.Commit() + + go func(ccipContracts testhelpers.CCIPContracts, currentSeqNum int) { + seqNumber := currentSeqNum + 1 + defer msgWg.Done() + for { + <-ticker.C // wait for ticker + t.Logf("sending request for seqnum %d", seqNumber) + ccipContracts.SendMessage(t, gasLimit, tokenAmount, ccipTH.Dest.Receivers[0].Receiver.Address()) + ccipContracts.Source.Chain.Commit() + seqNumber++ + if seqNumber == endSeq { + return + } + } + }(ccipTH.CCIPContracts, currentSeqNum) + + ccipTH.DeployNewPriceRegistry(t) + commitOnchainConfig := ccipTH.CreateDefaultCommitOnchainConfig(t) + commitOffchainConfig := ccipTH.CreateDefaultCommitOffchainConfig(t) + execOnchainConfig := ccipTH.CreateDefaultExecOnchainConfig(t) + execOffchainConfig := ccipTH.CreateDefaultExecOffchainConfig(t) + + ccipTH.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig) + + // wait for all requests to be complete + msgWg.Wait() + for i := startSeq; i < endSeq; i++ { + ccipTH.AllNodesHaveReqSeqNum(t, i) + ccipTH.EventuallyReportCommitted(t, i) + + executionLogs := ccipTH.AllNodesHaveExecutedSeqNums(t, i, i) + assert.Len(t, executionLogs, 1) + ccipTH.AssertExecState(t, executionLogs[0], testhelpers.ExecutionStateSuccess) + } + + for i, node := range ccipTH.Nodes { + t.Logf("verifying node %d", i) + node.EventuallyNodeUsesNewCommitConfig(t, ccipTH, ccipdata.CommitOnchainConfig{ + PriceRegistry: ccipTH.Dest.PriceRegistry.Address(), + }) + node.EventuallyNodeUsesNewExecConfig(t, ccipTH, v1_5_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: testhelpers.PermissionLessExecutionThresholdSeconds, + Router: ccipTH.Dest.Router.Address(), + PriceRegistry: ccipTH.Dest.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxNumberOfTokensPerMsg: 5, + MaxPoolReleaseOrMintGas: 200_000, + MaxTokenTransferGas: 100_000, + }) + node.EventuallyNodeUsesUpdatedPriceRegistry(t, ccipTH) + } + currentSeqNum = endSeq + if test.allowOutOfOrderExecution { + currentNonce = uint64(currentSeqNum) + } + }) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/autosync.go b/core/services/ocr2/plugins/ccip/internal/cache/autosync.go new file mode 100644 index 00000000000..690b4dd05b9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/autosync.go @@ -0,0 +1,141 @@ +package cache + +import ( + "context" + "database/sql" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" +) + +type AutoSync[T any] interface { + Get(ctx context.Context, syncFunc func(ctx context.Context) (T, error)) (T, error) +} + +// LogpollerEventsBased IMPORTANT: Cache refresh relies on the events that are finalized. +// This introduces some delay between the event onchain occurrence and cache refreshing. +// This is intentional, because we want to prevent handling reorgs within the cache. +type LogpollerEventsBased[T any] struct { + logPoller logpoller.LogPoller + observedEvents []common.Hash + address common.Address + + lock *sync.RWMutex + value T + lastChangeBlock int64 +} + +func NewLogpollerEventsBased[T any]( + lp logpoller.LogPoller, + observedEvents []common.Hash, + contractAddress common.Address, +) *LogpollerEventsBased[T] { + var emptyValue T + return &LogpollerEventsBased[T]{ + logPoller: lp, + observedEvents: observedEvents, + address: contractAddress, + + lock: &sync.RWMutex{}, + value: emptyValue, + lastChangeBlock: 0, + } +} + +func (c *LogpollerEventsBased[T]) Get(ctx context.Context, syncFunc func(ctx context.Context) (T, error)) (T, error) { + var empty T + + hasExpired, newEventBlockNum, err := c.hasExpired(ctx) + if err != nil { + return empty, fmt.Errorf("check cache expiration: %w", err) + } + + if hasExpired { + var latestValue T + latestValue, err = syncFunc(ctx) + if err != nil { + return empty, fmt.Errorf("sync func: %w", err) + } + + c.set(latestValue, newEventBlockNum) + return latestValue, nil + } + + cachedValue := c.get() + if err != nil { + return empty, fmt.Errorf("get cached value: %w", err) + } + + c.lock.Lock() + if newEventBlockNum > c.lastChangeBlock { + // update the most recent block number + // that way the scanning window is shorter in the next run + c.lastChangeBlock = newEventBlockNum + } + c.lock.Unlock() + + return cachedValue, nil +} + +func (c *LogpollerEventsBased[T]) hasExpired(ctx context.Context) (expired bool, blockOfLatestEvent int64, err error) { + c.lock.RLock() + blockOfCurrentValue := c.lastChangeBlock + c.lock.RUnlock() + + // NOTE: latest block should be fetched before LatestBlockByEventSigsAddrsWithConfs + // Otherwise there might be new events between LatestBlockByEventSigsAddrsWithConfs and + // latestBlock which will be missed. + latestBlock, err := c.logPoller.LatestBlock(ctx) + latestFinalizedBlock := int64(0) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return false, 0, fmt.Errorf("get latest log poller block: %w", err) + } else if err == nil { + // Since we know that we have all the events till latestBlock.FinalizedBlockNumber + // we want to return the block number instead of the block of the latest event + // for reducing the scan window on the next call. + latestFinalizedBlock = latestBlock.FinalizedBlockNumber + } + + if blockOfCurrentValue == 0 { + return true, latestFinalizedBlock, nil + } + + blockOfLatestEvent, err = c.logPoller.LatestBlockByEventSigsAddrsWithConfs( + ctx, + blockOfCurrentValue, + c.observedEvents, + []common.Address{c.address}, + evmtypes.Finalized, + ) + if err != nil { + return false, 0, fmt.Errorf("get latest events form lp: %w", err) + } + + if blockOfLatestEvent > latestFinalizedBlock { + latestFinalizedBlock = blockOfLatestEvent + } + return blockOfLatestEvent > blockOfCurrentValue, latestFinalizedBlock, nil +} + +func (c *LogpollerEventsBased[T]) set(value T, blockNum int64) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lastChangeBlock > blockNum { + return + } + + c.value = value + c.lastChangeBlock = blockNum +} + +func (c *LogpollerEventsBased[T]) get() T { + c.lock.RLock() + defer c.lock.RUnlock() + return c.value +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go b/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go new file mode 100644 index 00000000000..0babfeb421d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/autosync_test.go @@ -0,0 +1,128 @@ +package cache_test + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" +) + +func TestLogpollerEventsBased(t *testing.T) { + ctx := testutils.Context(t) + lp := lpmocks.NewLogPoller(t) + observedEvents := []common.Hash{ + utils.Bytes32FromString("event a"), + utils.Bytes32FromString("event b"), + } + contractAddress := utils.RandomAddress() + c := cache.NewLogpollerEventsBased[[]int](lp, observedEvents, contractAddress) + + testRounds := []struct { + logPollerLatestBlock int64 // latest block that logpoller parsed + latestEventBlock int64 // latest block that an event was seen + stateLatestBlock int64 // block of the current cached value (before run) + shouldSync bool // whether we expect sync to happen in this round + syncData []int // data returned after sync + expData []int // expected data that cache will return + }{ + { + // this is the first 'Get' call to our cache, an event was seen at block 800 + // and now log poller has reached block 1000. + logPollerLatestBlock: 1000, + latestEventBlock: 800, + stateLatestBlock: 0, + shouldSync: true, + syncData: []int{1, 2, 3}, + expData: []int{1, 2, 3}, + }, + { + // log poller moved a few blocks and there weren't any new events + logPollerLatestBlock: 1010, + latestEventBlock: 800, + stateLatestBlock: 1000, + shouldSync: false, + expData: []int{1, 2, 3}, + }, + { + // log poller moved a few blocks and there was a new event + logPollerLatestBlock: 1020, + latestEventBlock: 1020, + stateLatestBlock: 1010, + shouldSync: true, + syncData: []int{111}, + expData: []int{111}, + }, + { + // log poller moved a few more blocks and there was another new event + logPollerLatestBlock: 1050, + latestEventBlock: 1040, + stateLatestBlock: 1020, + shouldSync: true, + syncData: []int{222}, + expData: []int{222}, + }, + { + // log poller moved a few more blocks and there wasn't any new event + logPollerLatestBlock: 1100, + latestEventBlock: 1040, + stateLatestBlock: 1050, + shouldSync: false, + expData: []int{222}, + }, + { + // log poller moved a few more blocks and there wasn't any new event + logPollerLatestBlock: 1300, + latestEventBlock: 1040, + stateLatestBlock: 1100, + shouldSync: false, + expData: []int{222}, + }, + { + // log poller moved a few more blocks and there was a new event + // more recent than latest block (for whatever internal reason) + logPollerLatestBlock: 1300, + latestEventBlock: 1305, + stateLatestBlock: 1300, + shouldSync: true, + syncData: []int{666}, + expData: []int{666}, + }, + { + // log poller moved a few more blocks and there wasn't any new event + logPollerLatestBlock: 1300, + latestEventBlock: 1305, + stateLatestBlock: 1305, // <-- that's what we are testing in this round + shouldSync: false, + expData: []int{666}, + }, + } + + for _, round := range testRounds { + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{FinalizedBlockNumber: round.logPollerLatestBlock}, nil).Once() + + if round.stateLatestBlock > 0 { + lp.On( + "LatestBlockByEventSigsAddrsWithConfs", + mock.Anything, + round.stateLatestBlock, + observedEvents, + []common.Address{contractAddress}, + evmtypes.Finalized, + ).Return(round.latestEventBlock, nil).Once() + } + + data, err := c.Get(ctx, func(ctx context.Context) ([]int, error) { return round.syncData, nil }) + assert.NoError(t, err) + assert.Equal(t, round.expData, data) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go b/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go new file mode 100644 index 00000000000..00f90615eb2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/chain_health.go @@ -0,0 +1,273 @@ +package cache + +import ( + "context" + "sync" + "time" + + "github.com/patrickmn/go-cache" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +// ChainHealthcheck checks the health of the both source and destination chain. +// Based on the values returned, CCIP can make a decision to stop or continue processing messages. +// There are four things verified here: +// 1. Source chain is healthy (this is verified by checking if source LogPoller saw finality violation) +// 2. Dest chain is healthy (this is verified by checking if destination LogPoller saw finality violation) +// 3. CommitStore is down (this is verified by checking if CommitStore is down and destination RMN is not cursed) +// 4. Source chain is cursed (this is verified by checking if source RMN is not cursed) +// +// Whenever any of the above checks fail, the chain is considered unhealthy and the CCIP should stop +// processing messages. Additionally, when the chain is unhealthy, this information is considered "sticky" +// and is cached for a certain period of time based on defaultGlobalStatusExpirationDuration. +// This may lead to some false-positives, but in this case we want to be extra cautious and avoid executing any reorged messages. +// +// Additionally, to reduce the number of calls to the RPC, we refresh RMN state in the background based on defaultRMNStateRefreshInterval +type ChainHealthcheck interface { + job.ServiceCtx + IsHealthy(ctx context.Context) (bool, error) +} + +const ( + // RMN curse state is refreshed every 10 seconds + defaultRMNStateRefreshInterval = 10 * time.Second + // Whenever we mark the chain as unhealthy, we cache this information for 30 minutes + defaultGlobalStatusExpirationDuration = 30 * time.Minute + + globalStatusKey = "globalStatus" + rmnStatusKey = "rmnCurseCheck" +) + +type chainHealthcheck struct { + cache *cache.Cache + globalStatusKey string + rmnStatusKey string + globalStatusExpiration time.Duration + rmnStatusRefreshInterval time.Duration + + lggr logger.Logger + onRamp ccipdata.OnRampReader + commitStore ccipdata.CommitStoreReader + + services.StateMachine + wg *sync.WaitGroup + backgroundCtx context.Context //nolint:containedctx + backgroundCancel context.CancelFunc +} + +func NewChainHealthcheck(lggr logger.Logger, onRamp ccipdata.OnRampReader, commitStore ccipdata.CommitStoreReader) *chainHealthcheck { + ctx, cancel := context.WithCancel(context.Background()) + + ch := &chainHealthcheck{ + // Different keys use different expiration times, so we don't need to worry about the default value + cache: cache.New(cache.NoExpiration, 0), + rmnStatusKey: rmnStatusKey, + globalStatusKey: globalStatusKey, + globalStatusExpiration: defaultGlobalStatusExpirationDuration, + rmnStatusRefreshInterval: defaultRMNStateRefreshInterval, + + lggr: lggr, + onRamp: onRamp, + commitStore: commitStore, + + wg: new(sync.WaitGroup), + backgroundCtx: ctx, + backgroundCancel: cancel, + } + return ch +} + +// newChainHealthcheckWithCustomEviction is used for testing purposes only. It doesn't start background worker +func newChainHealthcheckWithCustomEviction(lggr logger.Logger, onRamp ccipdata.OnRampReader, commitStore ccipdata.CommitStoreReader, globalStatusDuration time.Duration, rmnStatusRefreshInterval time.Duration) *chainHealthcheck { + ctx, cancel := context.WithCancel(context.Background()) + + return &chainHealthcheck{ + cache: cache.New(rmnStatusRefreshInterval, 0), + rmnStatusKey: rmnStatusKey, + globalStatusKey: globalStatusKey, + globalStatusExpiration: globalStatusDuration, + rmnStatusRefreshInterval: rmnStatusRefreshInterval, + + lggr: lggr, + onRamp: onRamp, + commitStore: commitStore, + + wg: new(sync.WaitGroup), + backgroundCtx: ctx, + backgroundCancel: cancel, + } +} + +type rmnResponse struct { + healthy bool + err error +} + +func (c *chainHealthcheck) IsHealthy(ctx context.Context) (bool, error) { + // Verify if flag is raised to indicate that the chain is not healthy + // If set to false then immediately return false without checking the chain + if cachedValue, found := c.cache.Get(c.globalStatusKey); found { + healthy, ok := cachedValue.(bool) + // If cached value is properly casted to bool and not healthy it means the sticky flag is raised + // and should be returned immediately + if !ok { + c.lggr.Criticalw("Failed to cast cached value to sticky healthcheck", "value", cachedValue) + } else if ok && !healthy { + return false, nil + } + } + + // These checks are cheap and don't require any communication with the database or RPC + if healthy, err := c.checkIfReadersAreHealthy(ctx); err != nil { + return false, err + } else if !healthy { + c.markStickyStatusUnhealthy() + return healthy, nil + } + + // First call might initialize cache if it's not initialized yet. Otherwise, it will use the cached value + if healthy, err := c.checkIfRMNsAreHealthy(ctx); err != nil { + return false, err + } else if !healthy { + c.markStickyStatusUnhealthy() + return healthy, nil + } + return true, nil +} + +func (c *chainHealthcheck) Start(context.Context) error { + return c.StateMachine.StartOnce("ChainHealthcheck", func() error { + c.lggr.Info("Starting ChainHealthcheck") + c.wg.Add(1) + c.run() + return nil + }) +} + +func (c *chainHealthcheck) Close() error { + return c.StateMachine.StopOnce("ChainHealthcheck", func() error { + c.lggr.Info("Closing ChainHealthcheck") + c.backgroundCancel() + c.wg.Wait() + return nil + }) +} + +func (c *chainHealthcheck) run() { + ticker := time.NewTicker(c.rmnStatusRefreshInterval) + go func() { + defer c.wg.Done() + // Refresh the RMN state immediately after starting the background refresher + _, _ = c.refresh(c.backgroundCtx) + + for { + select { + case <-c.backgroundCtx.Done(): + return + case <-ticker.C: + _, err := c.refresh(c.backgroundCtx) + if err != nil { + c.lggr.Errorw("Failed to refresh RMN state in the background", "err", err) + } + } + } + }() +} + +func (c *chainHealthcheck) refresh(ctx context.Context) (bool, error) { + healthy, err := c.fetchRMNCurseState(ctx) + c.cache.Set( + c.rmnStatusKey, + rmnResponse{healthy, err}, + // Cache the value for 3 refresh intervals, this is just a defensive approach + // that will enforce the RMN state to be refreshed in case of bg worker hiccup (it should never happen) + 3*c.rmnStatusRefreshInterval, + ) + return healthy, err +} + +// checkIfReadersAreHealthy checks if the source and destination chains are healthy by calling underlying LogPoller +// These calls are cheap because they don't require any communication with the database or RPC, so we don't have +// to cache the result of these calls. +func (c *chainHealthcheck) checkIfReadersAreHealthy(ctx context.Context) (bool, error) { + sourceChainHealthy, err := c.onRamp.IsSourceChainHealthy(ctx) + if err != nil { + return false, errors.Wrap(err, "onRamp IsSourceChainHealthy errored") + } + + destChainHealthy, err := c.commitStore.IsDestChainHealthy(ctx) + if err != nil { + return false, errors.Wrap(err, "commitStore IsDestChainHealthy errored") + } + + if !sourceChainHealthy || !destChainHealthy { + c.lggr.Criticalw( + "Lane processing is stopped because source or destination chain is reported unhealthy", + "sourceChainHealthy", sourceChainHealthy, + "destChainHealthy", destChainHealthy, + ) + } + return sourceChainHealthy && destChainHealthy, nil +} + +func (c *chainHealthcheck) checkIfRMNsAreHealthy(ctx context.Context) (bool, error) { + if cachedValue, found := c.cache.Get(c.rmnStatusKey); found { + rmn := cachedValue.(rmnResponse) + return rmn.healthy, rmn.err + } + + // If the value is not found in the cache, fetch the RMN curse state in a sync manner for the first time + c.lggr.Info("Refreshing RMN state from the plugin routine, this should happen only once per lane during boot") + return c.refresh(ctx) +} + +func (c *chainHealthcheck) markStickyStatusUnhealthy() { + c.cache.Set(c.globalStatusKey, false, c.globalStatusExpiration) +} + +func (c *chainHealthcheck) fetchRMNCurseState(ctx context.Context) (bool, error) { + var ( + eg = new(errgroup.Group) + isCommitStoreDown bool + isSourceCursed bool + ) + + eg.Go(func() error { + var err error + isCommitStoreDown, err = c.commitStore.IsDown(ctx) + if err != nil { + return errors.Wrap(err, "commitStore isDown check errored") + } + return nil + }) + + eg.Go(func() error { + var err error + isSourceCursed, err = c.onRamp.IsSourceCursed(ctx) + if err != nil { + return errors.Wrap(err, "onRamp isSourceCursed errored") + } + return nil + }) + + if err := eg.Wait(); err != nil { + return false, err + } + + if isCommitStoreDown || isSourceCursed { + c.lggr.Criticalw( + "Lane processing is stopped because source chain is cursed or CommitStore is down", + "isCommitStoreDown", isCommitStoreDown, + "isSourceCursed", isSourceCursed, + ) + return false, nil + } + return true, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go b/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go new file mode 100644 index 00000000000..ccdc7c4b22f --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/chain_health_test.go @@ -0,0 +1,303 @@ +package cache + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" +) + +func Test_RMNStateCaching(t *testing.T) { + ctx := tests.Context(t) + lggr := logger.TestLogger(t) + mockCommitStore := mocks.NewCommitStoreReader(t) + mockOnRamp := mocks.NewOnRampReader(t) + + chainState := newChainHealthcheckWithCustomEviction(lggr, mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour) + + // Chain is not cursed and healthy + mockCommitStore.On("IsDown", ctx).Return(false, nil).Once() + mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Maybe() + mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Once() + mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Maybe() + healthy, err := chainState.IsHealthy(ctx) + assert.NoError(t, err) + assert.True(t, healthy) + + // Chain is cursed, but cache is stale + mockCommitStore.On("IsDown", ctx).Return(true, nil).Once() + mockOnRamp.On("IsSourceCursed", ctx).Return(true, nil).Once() + healthy, err = chainState.IsHealthy(ctx) + assert.NoError(t, err) + assert.True(t, healthy) + + // Enforce cache refresh + _, err = chainState.refresh(ctx) + assert.NoError(t, err) + + healthy, err = chainState.IsHealthy(ctx) + assert.Nil(t, err) + assert.False(t, healthy) + + // Chain is not cursed, but previous curse should be "sticky" even when force refreshing + mockCommitStore.On("IsDown", ctx).Return(false, nil).Maybe() + mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Maybe() + // Enforce cache refresh + _, err = chainState.refresh(ctx) + assert.NoError(t, err) + + healthy, err = chainState.IsHealthy(ctx) + assert.Nil(t, err) + assert.False(t, healthy) +} + +func Test_ChainStateIsCached(t *testing.T) { + ctx := tests.Context(t) + lggr := logger.TestLogger(t) + mockCommitStore := mocks.NewCommitStoreReader(t) + mockOnRamp := mocks.NewOnRampReader(t) + + chainState := newChainHealthcheckWithCustomEviction(lggr, mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour) + + // Chain is not cursed and healthy + mockCommitStore.On("IsDown", ctx).Return(false, nil).Maybe() + mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Once() + mockOnRamp.On("IsSourceCursed", ctx).Return(false, nil).Maybe() + mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Once() + + _, err := chainState.refresh(ctx) + assert.NoError(t, err) + + healthy, err := chainState.IsHealthy(ctx) + assert.NoError(t, err) + assert.True(t, healthy) + + // Chain is not healthy + mockCommitStore.On("IsDestChainHealthy", ctx).Return(false, nil).Once() + mockOnRamp.On("IsSourceChainHealthy", ctx).Return(false, nil).Once() + _, err = chainState.refresh(ctx) + assert.NoError(t, err) + + healthy, err = chainState.IsHealthy(ctx) + assert.NoError(t, err) + assert.False(t, healthy) + + // Previous value is returned + mockCommitStore.On("IsDestChainHealthy", ctx).Return(true, nil).Maybe() + mockOnRamp.On("IsSourceChainHealthy", ctx).Return(true, nil).Maybe() + + _, err = chainState.refresh(ctx) + assert.NoError(t, err) + + healthy, err = chainState.IsHealthy(ctx) + assert.NoError(t, err) + assert.False(t, healthy) +} + +func Test_ChainStateIsHealthy(t *testing.T) { + testCases := []struct { + name string + commitStoreDown bool + commitStoreErr error + onRampCursed bool + onRampErr error + sourceChainUnhealthy bool + sourceChainErr error + destChainUnhealthy bool + destChainErr error + + expectedState bool + expectedErr bool + }{ + { + name: "all components healthy", + expectedState: true, + }, + { + name: "CommitStore is down", + commitStoreDown: true, + expectedState: false, + }, + { + name: "CommitStore error", + commitStoreErr: errors.New("commit store error"), + expectedErr: true, + }, + { + name: "OnRamp is cursed", + onRampCursed: true, + expectedState: false, + }, + { + name: "OnRamp error", + onRampErr: errors.New("onramp error"), + expectedErr: true, + }, + { + name: "Source chain is unhealthy", + sourceChainUnhealthy: true, + expectedState: false, + }, + { + name: "Source chain error", + sourceChainErr: errors.New("source chain error"), + expectedErr: true, + }, + { + name: "Destination chain is unhealthy", + destChainUnhealthy: true, + expectedState: false, + }, + { + name: "Destination chain error", + destChainErr: errors.New("destination chain error"), + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := tests.Context(t) + mockCommitStore := mocks.NewCommitStoreReader(t) + mockOnRamp := mocks.NewOnRampReader(t) + + mockCommitStore.On("IsDown", ctx).Return(tc.commitStoreDown, tc.commitStoreErr).Maybe() + mockCommitStore.On("IsDestChainHealthy", ctx).Return(!tc.destChainUnhealthy, tc.destChainErr).Maybe() + mockOnRamp.On("IsSourceCursed", ctx).Return(tc.onRampCursed, tc.onRampErr).Maybe() + mockOnRamp.On("IsSourceChainHealthy", ctx).Return(!tc.sourceChainUnhealthy, tc.sourceChainErr).Maybe() + + chainState := newChainHealthcheckWithCustomEviction(logger.TestLogger(t), mockOnRamp, mockCommitStore, 10*time.Hour, 10*time.Hour) + + healthy, err := chainState.IsHealthy(ctx) + + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedState, healthy) + } + }) + } +} + +func Test_RefreshingInBackground(t *testing.T) { + mockCommitStore := newCommitStoreWrapper(t, true, nil) + mockCommitStore.CommitStoreReader.On("IsDestChainHealthy", mock.Anything).Return(true, nil).Maybe() + + mockOnRamp := newOnRampWrapper(t, true, nil) + mockOnRamp.OnRampReader.On("IsSourceChainHealthy", mock.Anything).Return(true, nil).Maybe() + + chainState := newChainHealthcheckWithCustomEviction( + logger.TestLogger(t), + mockOnRamp, + mockCommitStore, + 10*time.Microsecond, + 10*time.Microsecond, + ) + require.NoError(t, chainState.Start(tests.Context(t))) + + // All healthy + assertHealthy(t, chainState, true) + + // Commit store not healthy + mockCommitStore.set(false, nil) + assertHealthy(t, chainState, false) + + // Commit store error + mockCommitStore.set(false, fmt.Errorf("commit store error")) + assertError(t, chainState) + + // Commit store is back + mockCommitStore.set(true, nil) + assertHealthy(t, chainState, true) + + // OnRamp not healthy + mockOnRamp.set(false, nil) + assertHealthy(t, chainState, false) + + // OnRamp error + mockOnRamp.set(false, fmt.Errorf("onramp error")) + assertError(t, chainState) + + // All back in healthy state + mockOnRamp.set(true, nil) + assertHealthy(t, chainState, true) + + require.NoError(t, chainState.Close()) +} + +func assertHealthy(t *testing.T, ch *chainHealthcheck, expected bool) { + assert.Eventually(t, func() bool { + healthy, err := ch.IsHealthy(testutils.Context(t)) + return err == nil && healthy == expected + }, testutils.WaitTimeout(t), testutils.TestInterval) +} + +func assertError(t *testing.T, ch *chainHealthcheck) { + assert.Eventually(t, func() bool { + _, err := ch.IsHealthy(testutils.Context(t)) + return err != nil + }, testutils.WaitTimeout(t), testutils.TestInterval) +} + +type fakeStatusWrapper struct { + *mocks.CommitStoreReader + *mocks.OnRampReader + + healthy bool + err error + mu *sync.Mutex +} + +func newCommitStoreWrapper(t *testing.T, healthy bool, err error) *fakeStatusWrapper { + return &fakeStatusWrapper{ + CommitStoreReader: mocks.NewCommitStoreReader(t), + healthy: healthy, + err: err, + mu: new(sync.Mutex), + } +} + +func newOnRampWrapper(t *testing.T, healthy bool, err error) *fakeStatusWrapper { + return &fakeStatusWrapper{ + OnRampReader: mocks.NewOnRampReader(t), + healthy: healthy, + err: err, + mu: new(sync.Mutex), + } +} + +func (f *fakeStatusWrapper) IsDown(context.Context) (bool, error) { + f.mu.Lock() + defer f.mu.Unlock() + return !f.healthy, f.err +} + +func (f *fakeStatusWrapper) IsSourceCursed(context.Context) (bool, error) { + f.mu.Lock() + defer f.mu.Unlock() + return !f.healthy, f.err +} + +func (f *fakeStatusWrapper) Close() error { + return nil +} + +func (f *fakeStatusWrapper) set(healthy bool, err error) { + f.mu.Lock() + defer f.mu.Unlock() + f.healthy = healthy + f.err = err +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go new file mode 100644 index 00000000000..5f8bd5edc56 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots.go @@ -0,0 +1,243 @@ +package cache + +import ( + "context" + "slices" + "sync" + "time" + + "github.com/patrickmn/go-cache" + orderedmap "github.com/wk8/go-ordered-map/v2" + + "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +const ( + // EvictionGracePeriod defines how long after the messageVisibilityInterval a root is still kept in the cache + EvictionGracePeriod = 1 * time.Hour + // CleanupInterval defines how often roots cache is scanned to evict stale roots + CleanupInterval = 30 * time.Minute +) + +type CommitsRootsCache interface { + RootsEligibleForExecution(ctx context.Context) ([]ccip.CommitStoreReport, error) + MarkAsExecuted(merkleRoot [32]byte) + Snooze(merkleRoot [32]byte) +} + +func NewCommitRootsCache( + lggr logger.Logger, + reader ccip.CommitStoreReader, + messageVisibilityInterval time.Duration, + rootSnoozeTime time.Duration, +) CommitsRootsCache { + return newCommitRootsCache( + lggr, + reader, + messageVisibilityInterval, + rootSnoozeTime, + CleanupInterval, + EvictionGracePeriod, + ) +} + +func newCommitRootsCache( + lggr logger.Logger, + reader ccip.CommitStoreReader, + messageVisibilityInterval time.Duration, + rootSnoozeTime time.Duration, + cleanupInterval time.Duration, + evictionGracePeriod time.Duration, +) *commitRootsCache { + snoozedRoots := cache.New(rootSnoozeTime, cleanupInterval) + executedRoots := cache.New(messageVisibilityInterval+evictionGracePeriod, cleanupInterval) + + return &commitRootsCache{ + lggr: lggr, + reader: reader, + rootSnoozeTime: rootSnoozeTime, + finalizedRoots: orderedmap.New[string, ccip.CommitStoreReportWithTxMeta](), + executedRoots: executedRoots, + snoozedRoots: snoozedRoots, + messageVisibilityInterval: messageVisibilityInterval, + latestFinalizedCommitRootTs: time.Now().Add(-messageVisibilityInterval), + cacheMu: sync.RWMutex{}, + } +} + +type commitRootsCache struct { + lggr logger.Logger + reader ccip.CommitStoreReader + messageVisibilityInterval time.Duration + rootSnoozeTime time.Duration + + // Mutable state. finalizedRoots is thread-safe by default, but updating latestFinalizedCommitRootTs and finalizedRoots requires locking. + cacheMu sync.RWMutex + // finalizedRoots is a map of merkleRoot -> CommitStoreReportWithTxMeta. It stores all the CommitReports that are + // marked as finalized by LogPoller, but not executed yet. Keeping only finalized reports doesn't require any state sync between LP and the cache. + // In order to keep this map size under control, we evict stale items every time we fetch new logs from the database. + // Also, ccip.CommitStoreReportWithTxMeta is a very tiny entity with almost fixed size, so it's not a big deal to keep it in memory. + // In case of high memory footprint caused by storing roots, we can make these even more lightweight by removing token/gas price updates. + // Whenever the root is executed (all messages executed and ExecutionStateChange events are finalized), we remove the root from the map. + finalizedRoots *orderedmap.OrderedMap[string, ccip.CommitStoreReportWithTxMeta] + // snoozedRoots used only for temporary snoozing roots. It's a cache with TTL (usually around 5 minutes, but this configuration is set up on chain using rootSnoozeTime) + snoozedRoots *cache.Cache + // executedRoots is a cache with TTL (usually around 8 hours, but this configuration is set up on chain using messageVisibilityInterval). + // We keep executed roots there to make sure we don't accidentally try to reprocess already executed CommitReport + executedRoots *cache.Cache + // latestFinalizedCommitRootTs is the timestamp of the latest finalized commit root (youngest in terms of timestamp). + // It's used get only the logs that were considered as unfinalized in a previous run. + // This way we limit database scans to the minimum and keep polling "unfinalized" part of the ReportAccepted events queue. + latestFinalizedCommitRootTs time.Time +} + +func (r *commitRootsCache) RootsEligibleForExecution(ctx context.Context) ([]ccip.CommitStoreReport, error) { + // 1. Fetch all the logs from the database after the latest finalized commit root timestamp. + // If this is a first run, it will fetch all the logs based on the messageVisibilityInterval. + // Worst case scenario, it will fetch around 480 reports (OCR Commit 60 seconds (fast chains default) * messageVisibilityInterval set to 8 hours (mainnet default)) + // Even with the larger messageVisibilityInterval window (e.g. 24 hours) it should be acceptable (around 1500 logs). + // Keep in mind that this potentially heavy operation happens only once during the plugin boot and it's no different from the previous implementation. + logs, err := r.fetchLogsFromCommitStore(ctx) + if err != nil { + return nil, err + } + + // 2. Iterate over the logs and check if the root is finalized or not. Return finalized and unfinalized reports + // It promotes finalized roots to the finalizedRoots map and evicts stale roots. + finalizedReports, unfinalizedReports := r.updateFinalizedRoots(logs) + + // 3. Join finalized commit reports with unfinalized reports and outfilter snoozed roots. + // Return only the reports that are not snoozed. + return r.pickReadyToExecute(finalizedReports, unfinalizedReports), nil +} + +// MarkAsExecuted marks the root as executed. It means that all the messages from the root were executed and the ExecutionStateChange event was finalized. +// Executed roots are removed from the cache. +func (r *commitRootsCache) MarkAsExecuted(merkleRoot [32]byte) { + prettyMerkleRoot := merkleRootToString(merkleRoot) + r.lggr.Infow("Marking root as executed and removing entirely from cache", "merkleRoot", prettyMerkleRoot) + + r.cacheMu.Lock() + defer r.cacheMu.Unlock() + r.finalizedRoots.Delete(prettyMerkleRoot) + r.executedRoots.SetDefault(prettyMerkleRoot, struct{}{}) +} + +// Snooze temporarily snoozes the root. It means that the root is not eligible for execution for a certain period of time. +// Snoozed roots are skipped when calling RootsEligibleForExecution +func (r *commitRootsCache) Snooze(merkleRoot [32]byte) { + prettyMerkleRoot := merkleRootToString(merkleRoot) + r.lggr.Infow("Snoozing root temporarily", "merkleRoot", prettyMerkleRoot, "rootSnoozeTime", r.rootSnoozeTime) + r.snoozedRoots.SetDefault(prettyMerkleRoot, struct{}{}) +} + +func (r *commitRootsCache) isSnoozed(merkleRoot [32]byte) bool { + _, snoozed := r.snoozedRoots.Get(merkleRootToString(merkleRoot)) + return snoozed +} + +func (r *commitRootsCache) isExecuted(merkleRoot [32]byte) bool { + _, executed := r.executedRoots.Get(merkleRootToString(merkleRoot)) + return executed +} + +func (r *commitRootsCache) fetchLogsFromCommitStore(ctx context.Context) ([]ccip.CommitStoreReportWithTxMeta, error) { + r.cacheMu.Lock() + messageVisibilityWindow := time.Now().Add(-r.messageVisibilityInterval) + if r.latestFinalizedCommitRootTs.Before(messageVisibilityWindow) { + r.latestFinalizedCommitRootTs = messageVisibilityWindow + } + commitRootsFilterTimestamp := r.latestFinalizedCommitRootTs + r.cacheMu.Unlock() + + // IO operation, release lock before! + r.lggr.Infow("Fetching Commit Reports with timestamp greater than or equal to", "blockTimestamp", commitRootsFilterTimestamp) + return r.reader.GetAcceptedCommitReportsGteTimestamp(ctx, commitRootsFilterTimestamp, 0) +} + +func (r *commitRootsCache) updateFinalizedRoots(logs []ccip.CommitStoreReportWithTxMeta) ([]ccip.CommitStoreReportWithTxMeta, []ccip.CommitStoreReportWithTxMeta) { + r.cacheMu.Lock() + defer r.cacheMu.Unlock() + + // Assuming logs are properly ordered by block_timestamp, log_index + var unfinalizedReports []ccip.CommitStoreReportWithTxMeta + for _, log := range logs { + prettyMerkleRoot := merkleRootToString(log.MerkleRoot) + // Defensive check, if something is marked as executed, never allow it to come back to the cache + if r.isExecuted(log.MerkleRoot) { + r.lggr.Debugw("Ignoring root marked as executed", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli) + continue + } + + if log.IsFinalized() { + r.lggr.Debugw("Adding finalized root to cache", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli) + r.finalizedRoots.Store(prettyMerkleRoot, log) + } else { + r.lggr.Debugw("Bypassing unfinalized root", "merkleRoot", prettyMerkleRoot, "blockTimestamp", log.BlockTimestampUnixMilli) + unfinalizedReports = append(unfinalizedReports, log) + } + } + + if newest := r.finalizedRoots.Newest(); newest != nil { + r.latestFinalizedCommitRootTs = time.UnixMilli(newest.Value.BlockTimestampUnixMilli) + } + + var finalizedRoots []ccip.CommitStoreReportWithTxMeta + var rootsToDelete []string + + messageVisibilityWindow := time.Now().Add(-r.messageVisibilityInterval) + for pair := r.finalizedRoots.Oldest(); pair != nil; pair = pair.Next() { + // Mark items as stale if they are older than the messageVisibilityInterval + // SortedMap doesn't allow to iterate and delete, so we mark roots for deletion and remove them in a separate loop + if time.UnixMilli(pair.Value.BlockTimestampUnixMilli).Before(messageVisibilityWindow) { + rootsToDelete = append(rootsToDelete, pair.Key) + continue + } + finalizedRoots = append(finalizedRoots, pair.Value) + } + + // Remove stale items + for _, root := range rootsToDelete { + r.finalizedRoots.Delete(root) + } + + return finalizedRoots, unfinalizedReports +} + +func (r *commitRootsCache) pickReadyToExecute(r1 []ccip.CommitStoreReportWithTxMeta, r2 []ccip.CommitStoreReportWithTxMeta) []ccip.CommitStoreReport { + allReports := append(r1, r2...) + eligibleReports := make([]ccip.CommitStoreReport, 0, len(allReports)) + for _, report := range allReports { + if r.isSnoozed(report.MerkleRoot) { + r.lggr.Debugw("Skipping snoozed root", + "minSeqNr", report.Interval.Min, + "maxSeqNr", report.Interval.Max, + "merkleRoot", merkleRootToString(report.MerkleRoot)) + continue + } + eligibleReports = append(eligibleReports, report.CommitStoreReport) + } + // safety check, probably not needed + slices.SortFunc(eligibleReports, func(i, j ccip.CommitStoreReport) int { + return int(i.Interval.Min - j.Interval.Min) + }) + return eligibleReports +} + +// internal use only for testing +func (r *commitRootsCache) finalizedCachedLogs() []ccip.CommitStoreReport { + r.cacheMu.RLock() + defer r.cacheMu.RUnlock() + + var finalizedRoots []ccip.CommitStoreReport + for pair := r.finalizedRoots.Oldest(); pair != nil; pair = pair.Next() { + finalizedRoots = append(finalizedRoots, pair.Value.CommitStoreReport) + } + return finalizedRoots +} + +func merkleRootToString(merkleRoot ccip.Hash) string { + return merkleRoot.String() +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go new file mode 100644 index 00000000000..dc0a8443497 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_test.go @@ -0,0 +1,297 @@ +package cache_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +func Test_RootsEligibleForExecution(t *testing.T) { + ctx := testutils.Context(t) + chainID := testutils.NewRandomEVMChainID() + orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t)) + lpOpts := logpoller.Opts{ + PollPeriod: time.Hour, + FinalityDepth: 2, + BackfillBatchSize: 20, + RpcBatchSize: 10, + KeepFinalizedBlocksDepth: 1000, + } + lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts) + + commitStoreAddr := utils.RandomAddress() + + block2 := time.Now().Add(-8 * time.Hour) + block3 := time.Now().Add(-5 * time.Hour) + block4 := time.Now().Add(-1 * time.Hour) + newBlock4 := time.Now().Add(-2 * time.Hour) + block5 := time.Now() + + root1 := utils.RandomBytes32() + root2 := utils.RandomBytes32() + root3 := utils.RandomBytes32() + root4 := utils.RandomBytes32() + root5 := utils.RandomBytes32() + + inputLogs := []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block2), + createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 2, root2, block2), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 2, time.Now(), 1))) + + commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp) + require.NoError(t, err) + + rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second) + + roots, err := rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1, root2) + + rootsCache.Snooze(root1) + rootsCache.Snooze(root2) + + // Roots are snoozed + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots) + + // Roots are unsnoozed + require.Eventually(t, func() bool { + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + return len(roots) == 2 + }, 5*time.Second, 1*time.Second) + + // Marking root as executed doesn't ignore other roots from the same block + rootsCache.MarkAsExecuted(root1) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2) + + // Finality progress, mark all roots as finalized + require.NoError(t, orm.InsertBlock(ctx, utils.RandomBytes32(), 3, time.Now(), 3)) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2) + + inputLogs = []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root3, block3), + createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root4, block4), + createReportAcceptedLog(t, chainID, commitStoreAddr, 5, 1, root5, block5), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3))) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2, root3, root4, root5) + + // Mark root in the middle as executed but keep the oldest one still waiting + rootsCache.MarkAsExecuted(root3) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2, root4, root5) + + // Simulate reorg by removing all unfinalized blocks + require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 4)) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2) + + // Root4 comes back but with the different block_timestamp (before the reorged block) + inputLogs = []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root4, newBlock4), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3))) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root2, root4) + + // Mark everything as executed + rootsCache.MarkAsExecuted(root2) + rootsCache.MarkAsExecuted(root4) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots) +} + +func Test_RootsEligibleForExecutionWithReorgs(t *testing.T) { + ctx := testutils.Context(t) + chainID := testutils.NewRandomEVMChainID() + orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t)) + lpOpts := logpoller.Opts{ + PollPeriod: time.Hour, + FinalityDepth: 2, + BackfillBatchSize: 20, + RpcBatchSize: 10, + KeepFinalizedBlocksDepth: 1000, + } + lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts) + + commitStoreAddr := utils.RandomAddress() + + block1 := time.Now().Add(-8 * time.Hour) + block2 := time.Now().Add(-5 * time.Hour) + block3 := time.Now().Add(-2 * time.Hour) + block4 := time.Now().Add(-1 * time.Hour) + + root1 := utils.RandomBytes32() + root2 := utils.RandomBytes32() + root3 := utils.RandomBytes32() + + // Genesis block + require.NoError(t, orm.InsertBlock(ctx, utils.RandomBytes32(), 1, block1, 1)) + inputLogs := []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block2), + createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 2, root2, block2), + createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root3, block3), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 3, time.Now(), 1))) + + commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp) + require.NoError(t, err) + + rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second) + + // Get all including finalized and unfinalized + roots, err := rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1, root2, root3) + + // Reorg everything away + require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 2)) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots) + + // Reinsert the logs, mark first one as finalized + inputLogs = []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root1, block3), + createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 1, root2, block4), + createReportAcceptedLog(t, chainID, commitStoreAddr, 4, 2, root3, block4), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 5, time.Now(), 3))) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1, root2, root3) + + // Reorg away everything except the finalized one + require.NoError(t, orm.DeleteLogsAndBlocksAfter(ctx, 4)) + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1) +} + +// Not very likely, but let's be more defensive here and verify if cache works properly and can deal with duplicates +func Test_BlocksWithTheSameTimestamps(t *testing.T) { + ctx := testutils.Context(t) + chainID := testutils.NewRandomEVMChainID() + orm := logpoller.NewORM(chainID, pgtest.NewSqlxDB(t), logger.TestLogger(t)) + lpOpts := logpoller.Opts{ + PollPeriod: time.Hour, + FinalityDepth: 2, + BackfillBatchSize: 20, + RpcBatchSize: 10, + KeepFinalizedBlocksDepth: 1000, + } + lp := logpoller.NewLogPoller(orm, nil, logger.TestLogger(t), nil, lpOpts) + + commitStoreAddr := utils.RandomAddress() + + block := time.Now().Add(-1 * time.Hour).Truncate(time.Second) + root1 := utils.RandomBytes32() + root2 := utils.RandomBytes32() + + inputLogs := []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 2, 1, root1, block), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 2, time.Now(), 2))) + + commitStore, err := v1_2_0.NewCommitStore(logger.TestLogger(t), commitStoreAddr, nil, lp) + require.NoError(t, err) + + rootsCache := cache.NewCommitRootsCache(logger.TestLogger(t), commitStore, 10*time.Hour, time.Second) + roots, err := rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1) + + inputLogs = []logpoller.Log{ + createReportAcceptedLog(t, chainID, commitStoreAddr, 3, 1, root2, block), + } + require.NoError(t, orm.InsertLogsWithBlock(ctx, inputLogs, logpoller.NewLogPollerBlock(utils.RandomBytes32(), 3, time.Now(), 3))) + + roots, err = rootsCache.RootsEligibleForExecution(ctx) + require.NoError(t, err) + assertRoots(t, roots, root1, root2) +} + +func assertRoots(t *testing.T, roots []cciptypes.CommitStoreReport, root ...[32]byte) { + require.Len(t, roots, len(root)) + for i, r := range root { + require.Equal(t, r, roots[i].MerkleRoot) + } +} + +func createReportAcceptedLog(t testing.TB, chainID *big.Int, address common.Address, blockNumber int64, logIndex int64, merkleRoot common.Hash, blockTimestamp time.Time) logpoller.Log { + tAbi, err := commit_store_1_2_0.CommitStoreMetaData.GetAbi() + require.NoError(t, err) + eseEvent, ok := tAbi.Events["ReportAccepted"] + require.True(t, ok) + + gasPriceUpdates := make([]commit_store_1_2_0.InternalGasPriceUpdate, 100) + tokenPriceUpdates := make([]commit_store_1_2_0.InternalTokenPriceUpdate, 100) + + for i := 0; i < 100; i++ { + gasPriceUpdates[i] = commit_store_1_2_0.InternalGasPriceUpdate{ + DestChainSelector: uint64(i), + UsdPerUnitGas: big.NewInt(int64(i)), + } + tokenPriceUpdates[i] = commit_store_1_2_0.InternalTokenPriceUpdate{ + SourceToken: utils.RandomAddress(), + UsdPerToken: big.NewInt(int64(i)), + } + } + + message := commit_store_1_2_0.CommitStoreCommitReport{ + PriceUpdates: commit_store_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: tokenPriceUpdates, + GasPriceUpdates: gasPriceUpdates, + }, + Interval: commit_store_1_2_0.CommitStoreInterval{Min: 1, Max: 10}, + MerkleRoot: merkleRoot, + } + + logData, err := eseEvent.Inputs.Pack(message) + require.NoError(t, err) + + topic0 := commit_store_1_2_0.CommitStoreReportAccepted{}.Topic() + + return logpoller.Log{ + Topics: [][]byte{ + topic0[:], + }, + Data: logData, + LogIndex: logIndex, + BlockHash: utils.RandomBytes32(), + BlockNumber: blockNumber, + BlockTimestamp: blockTimestamp.Truncate(time.Millisecond), + EventSig: topic0, + Address: address, + TxHash: utils.RandomBytes32(), + EvmChainId: ubig.New(chainID), + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go new file mode 100644 index 00000000000..34a470ef907 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/commit_roots_unit_test.go @@ -0,0 +1,212 @@ +package cache + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" +) + +func Test_CacheIsInitializedWithFirstCall(t *testing.T) { + commitStoreReader := mocks.NewCommitStoreReader(t) + cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Hour, time.Hour, time.Hour, time.Hour) + commitStoreReader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, mock.Anything, mock.Anything).Return([]ccip.CommitStoreReportWithTxMeta{}, nil) + + roots, err := cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots) +} + +func Test_CacheExpiration(t *testing.T) { + ts1 := time.Now().Add(-5 * time.Millisecond).Truncate(time.Millisecond) + ts2 := time.Now().Add(-3 * time.Millisecond).Truncate(time.Millisecond) + ts3 := time.Now().Add(-1 * time.Millisecond).Truncate(time.Millisecond) + + root1 := utils.RandomBytes32() + root2 := utils.RandomBytes32() + root3 := utils.RandomBytes32() + + commitStoreReader := mocks.NewCommitStoreReader(t) + cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, time.Second, time.Hour, time.Hour, time.Hour) + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root1, ts1, true), + createCommitStoreEntry(root2, ts2, true), + createCommitStoreEntry(root3, ts3, false), + }) + roots, err := cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots, root1, root2, root3) + + require.Eventually(t, func() bool { + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root3, ts3, false), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + return len(roots) == 1 && roots[0].MerkleRoot == root3 + }, 5*time.Second, 1*time.Second) +} + +func Test_CacheFullEviction(t *testing.T) { + commitStoreReader := mocks.NewCommitStoreReader(t) + cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, 2*time.Second, 1*time.Second, time.Second, time.Second) + + maxElements := 10000 + commitRoots := make([]ccip.CommitStoreReportWithTxMeta, maxElements) + for i := 0; i < maxElements; i++ { + finalized := i >= maxElements/2 + commitRoots[i] = createCommitStoreEntry(utils.RandomBytes32(), time.Now(), finalized) + } + mockCommitStoreReader(commitStoreReader, time.Time{}, commitRoots) + + roots, err := cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + require.Len(t, roots, maxElements) + + // Marks some of them as exeucted and some of them as snoozed + for i := 0; i < maxElements; i++ { + if i%3 == 0 { + cache.MarkAsExecuted(commitRoots[i].MerkleRoot) + } + if i%3 == 1 { + cache.Snooze(commitRoots[i].MerkleRoot) + } + } + // Eventually everything should be entirely removed from cache. We need that check to verify if cache doesn't grow indefinitely + require.Eventually(t, func() bool { + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{}) + roots1, err1 := cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err1) + + return len(roots1) == 0 && + cache.finalizedRoots.Len() == 0 && + len(cache.snoozedRoots.Items()) == 0 && + len(cache.executedRoots.Items()) == 0 + }, 10*time.Second, time.Second) +} + +func Test_CacheProgression_Internal(t *testing.T) { + ts1 := time.Now().Add(-5 * time.Hour).Truncate(time.Millisecond) + ts2 := time.Now().Add(-3 * time.Hour).Truncate(time.Millisecond) + ts3 := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) + + root1 := utils.RandomBytes32() + root2 := utils.RandomBytes32() + root3 := utils.RandomBytes32() + + commitStoreReader := mocks.NewCommitStoreReader(t) + + cache := newCommitRootsCache(logger.TestLogger(t), commitStoreReader, 10*time.Hour, time.Hour, time.Hour, time.Hour) + + // Empty cache, no results from the reader + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{}) + roots, err := cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots) + assertRoots(t, cache.finalizedCachedLogs()) + + // Single unfinalized root returned + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{createCommitStoreEntry(root1, ts1, false)}) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots, root1) + assertRoots(t, cache.finalizedCachedLogs()) + + // Finalized and unfinalized roots returned + mockCommitStoreReader(commitStoreReader, time.Time{}, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root1, ts1, true), + createCommitStoreEntry(root2, ts2, false), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots, root1, root2) + assertRoots(t, cache.finalizedCachedLogs(), root1) + + // Returning the same data should not impact cache state (no duplicates) + mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root1, ts1, true), + createCommitStoreEntry(root2, ts2, false), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots, root1, root2) + assertRoots(t, cache.finalizedCachedLogs(), root1) + + // Snoozing oldest root + cache.Snooze(root1) + mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root2, ts2, false), + createCommitStoreEntry(root3, ts3, false), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots, root2, root3) + assertRoots(t, cache.finalizedCachedLogs(), root1) + + // Snoozing everything + cache.Snooze(root2) + cache.Snooze(root3) + mockCommitStoreReader(commitStoreReader, ts1, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root2, ts2, true), + createCommitStoreEntry(root3, ts3, true), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots) + assertRoots(t, cache.finalizedCachedLogs(), root1, root2, root3) + + // Marking everything as executed removes it entirely, even if root is returned from the CommitStore + cache.MarkAsExecuted(root1) + cache.MarkAsExecuted(root2) + cache.MarkAsExecuted(root3) + mockCommitStoreReader(commitStoreReader, ts3, []ccip.CommitStoreReportWithTxMeta{ + createCommitStoreEntry(root2, ts2, true), + createCommitStoreEntry(root3, ts3, true), + }) + roots, err = cache.RootsEligibleForExecution(tests.Context(t)) + require.NoError(t, err) + assertRoots(t, roots) + assertRoots(t, cache.finalizedCachedLogs()) +} + +func assertRoots(t *testing.T, reports []ccip.CommitStoreReport, expectedRoots ...[32]byte) { + require.Len(t, reports, len(expectedRoots)) + for i, report := range reports { + assert.Equal(t, expectedRoots[i], report.MerkleRoot) + } +} + +func mockCommitStoreReader(reader *mocks.CommitStoreReader, blockTimestamp time.Time, roots []ccip.CommitStoreReportWithTxMeta) { + if blockTimestamp.IsZero() { + reader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, mock.Anything, mock.Anything). + Return(roots, nil).Once() + } else { + reader.On("GetAcceptedCommitReportsGteTimestamp", mock.Anything, blockTimestamp, mock.Anything). + Return(roots, nil).Once() + } +} + +func createCommitStoreEntry(root [32]byte, ts time.Time, finalized bool) ccip.CommitStoreReportWithTxMeta { + status := ccip.FinalizedStatusNotFinalized + if finalized { + status = ccip.FinalizedStatusFinalized + } + return ccip.CommitStoreReportWithTxMeta{ + CommitStoreReport: ccip.CommitStoreReport{ + MerkleRoot: root, + }, + TxMeta: ccip.TxMeta{ + BlockTimestampUnixMilli: ts.UnixMilli(), + Finalized: status, + }, + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/lazy.go b/core/services/ocr2/plugins/ccip/internal/cache/lazy.go new file mode 100644 index 00000000000..7b15abe271b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/lazy.go @@ -0,0 +1,20 @@ +package cache + +import "sync" + +type LazyFunction[T any] func() (T, error) + +// LazyFetch caches the results during the first call and then returns the cached value +// on each consecutive call. +func LazyFetch[T any](fun LazyFunction[T]) LazyFunction[T] { + var result T + var err error + var once sync.Once + + return func() (T, error) { + once.Do(func() { + result, err = fun() + }) + return result, err + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go b/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go new file mode 100644 index 00000000000..2777a6c2e0b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/lazy_test.go @@ -0,0 +1,71 @@ +package cache + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLazyFetchPass(t *testing.T) { + counterFunction := createPassingCounter() + + counter, _ := counterFunction() + require.Equal(t, 1, counter) + + lazyCounter := LazyFetch(counterFunction) + counter, _ = lazyCounter() + require.Equal(t, 2, counter) + + counter, _ = lazyCounter() + require.Equal(t, 2, counter) +} + +func TestLazyFetchFail(t *testing.T) { + counterFunction := createFailingCounter() + + _, err := counterFunction() + require.Equal(t, "counter 1 failed", err.Error()) + + lazyCounter := LazyFetch(counterFunction) + _, err = lazyCounter() + require.Equal(t, "counter 2 failed", err.Error()) + + _, err = lazyCounter() + require.Equal(t, "counter 2 failed", err.Error()) +} + +func TestLazyFetchMultipleRoutines(t *testing.T) { + routines := 100 + counterFunction := LazyFetch(createPassingCounter()) + + var wg sync.WaitGroup + wg.Add(routines) + + for i := 0; i < routines; i++ { + go func() { + counter, _ := counterFunction() + require.Equal(t, 1, counter) + wg.Done() + }() + } + + wg.Wait() +} + +func createFailingCounter() func() (int, error) { + counter := 0 + return func() (int, error) { + counter++ + return 0, fmt.Errorf("counter %d failed", counter) + } +} + +func createPassingCounter() func() (int, error) { + counter := 0 + return func() (int, error) { + counter++ + return counter, nil + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go b/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go new file mode 100644 index 00000000000..595b15774af --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/mocks/chain_health_mock.go @@ -0,0 +1,183 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// ChainHealthcheck is an autogenerated mock type for the ChainHealthcheck type +type ChainHealthcheck struct { + mock.Mock +} + +type ChainHealthcheck_Expecter struct { + mock *mock.Mock +} + +func (_m *ChainHealthcheck) EXPECT() *ChainHealthcheck_Expecter { + return &ChainHealthcheck_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *ChainHealthcheck) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ChainHealthcheck_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type ChainHealthcheck_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *ChainHealthcheck_Expecter) Close() *ChainHealthcheck_Close_Call { + return &ChainHealthcheck_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *ChainHealthcheck_Close_Call) Run(run func()) *ChainHealthcheck_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *ChainHealthcheck_Close_Call) Return(_a0 error) *ChainHealthcheck_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ChainHealthcheck_Close_Call) RunAndReturn(run func() error) *ChainHealthcheck_Close_Call { + _c.Call.Return(run) + return _c +} + +// IsHealthy provides a mock function with given fields: ctx +func (_m *ChainHealthcheck) IsHealthy(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsHealthy") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ChainHealthcheck_IsHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsHealthy' +type ChainHealthcheck_IsHealthy_Call struct { + *mock.Call +} + +// IsHealthy is a helper method to define mock.On call +// - ctx context.Context +func (_e *ChainHealthcheck_Expecter) IsHealthy(ctx interface{}) *ChainHealthcheck_IsHealthy_Call { + return &ChainHealthcheck_IsHealthy_Call{Call: _e.mock.On("IsHealthy", ctx)} +} + +func (_c *ChainHealthcheck_IsHealthy_Call) Run(run func(ctx context.Context)) *ChainHealthcheck_IsHealthy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ChainHealthcheck_IsHealthy_Call) Return(_a0 bool, _a1 error) *ChainHealthcheck_IsHealthy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ChainHealthcheck_IsHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *ChainHealthcheck_IsHealthy_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: _a0 +func (_m *ChainHealthcheck) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ChainHealthcheck_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type ChainHealthcheck_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - _a0 context.Context +func (_e *ChainHealthcheck_Expecter) Start(_a0 interface{}) *ChainHealthcheck_Start_Call { + return &ChainHealthcheck_Start_Call{Call: _e.mock.On("Start", _a0)} +} + +func (_c *ChainHealthcheck_Start_Call) Run(run func(_a0 context.Context)) *ChainHealthcheck_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ChainHealthcheck_Start_Call) Return(_a0 error) *ChainHealthcheck_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ChainHealthcheck_Start_Call) RunAndReturn(run func(context.Context) error) *ChainHealthcheck_Start_Call { + _c.Call.Return(run) + return _c +} + +// NewChainHealthcheck creates a new instance of ChainHealthcheck. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChainHealthcheck(t interface { + mock.TestingT + Cleanup(func()) +}) *ChainHealthcheck { + mock := &ChainHealthcheck{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go new file mode 100644 index 00000000000..941162448af --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health.go @@ -0,0 +1,70 @@ +package cache + +import ( + "context" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +var ( + laneHealthStatus = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ccip_lane_healthcheck_status", + Help: "Keep track of the chain healthcheck calls for each lane and plugin", + }, []string{"plugin", "source", "dest", "onramp"}) +) + +type ObservedChainHealthcheck struct { + ChainHealthcheck + + sourceChain string + destChain string + plugin string + // onrampAddress is used to distinguish between 1.0/2.0 lanes or blue/green lanes during deployment + // This changes very rarely, so it's not a performance concern for Prometheus + onrampAddress string + laneHealthStatus *prometheus.GaugeVec +} + +func NewObservedChainHealthCheck( + chainHealthcheck ChainHealthcheck, + plugin string, + sourceChain int64, + destChain int64, + onrampAddress cciptypes.Address, +) *ObservedChainHealthcheck { + return &ObservedChainHealthcheck{ + ChainHealthcheck: chainHealthcheck, + sourceChain: strconv.FormatInt(sourceChain, 10), + destChain: strconv.FormatInt(destChain, 10), + plugin: plugin, + laneHealthStatus: laneHealthStatus, + onrampAddress: string(onrampAddress), + } +} + +func (o *ObservedChainHealthcheck) IsHealthy(ctx context.Context) (bool, error) { + healthy, err := o.ChainHealthcheck.IsHealthy(ctx) + o.trackState(healthy, err) + return healthy, err +} + +func (o *ObservedChainHealthcheck) trackState(healthy bool, err error) { + if err != nil { + // Don't report errors as unhealthy, as they are not necessarily indicative of the chain's health + // Could be RPC issues, etc. + return + } + + status := 0 + if healthy { + status = 1 + } + + o.laneHealthStatus. + WithLabelValues(o.plugin, o.sourceChain, o.destChain, o.onrampAddress). + Set(float64(status)) +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go new file mode 100644 index 00000000000..19583a37c70 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/observed_chain_health_test.go @@ -0,0 +1,62 @@ +package cache + +import ( + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache/mocks" +) + +var address = cciptypes.Address(common.HexToAddress("0x1234567890123456789012345678901234567890").String()) + +func Test_ObservedChainStateSkipErrors(t *testing.T) { + mockedHealthcheck := mocks.NewChainHealthcheck(t) + mockedHealthcheck.On("IsHealthy", mock.Anything).Return(false, fmt.Errorf("error")) + + observedChainState := NewObservedChainHealthCheck( + mockedHealthcheck, + "plugin", + 10, + 20, + address, + ) + + _, err := observedChainState.IsHealthy(tests.Context(t)) + assert.Error(t, err) + assert.Equal(t, float64(0), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890"))) +} + +func Test_ObservedChainStateReportsStatus(t *testing.T) { + mockedHealthcheck := mocks.NewChainHealthcheck(t) + mockedHealthcheck.On("IsHealthy", mock.Anything).Return(true, nil).Once() + + observedChainState := NewObservedChainHealthCheck( + mockedHealthcheck, + "plugin", + 10, + 20, + address, + ) + + health, err := observedChainState.IsHealthy(tests.Context(t)) + require.NoError(t, err) + assert.True(t, health) + assert.Equal(t, float64(1), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890"))) + + // Mark as unhealthy + mockedHealthcheck.On("IsHealthy", mock.Anything).Return(false, nil).Once() + + health, err = observedChainState.IsHealthy(tests.Context(t)) + require.NoError(t, err) + assert.False(t, health) + assert.Equal(t, float64(0), testutil.ToFloat64(laneHealthStatus.WithLabelValues("plugin", "10", "20", "0x1234567890123456789012345678901234567890"))) +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/once.go b/core/services/ocr2/plugins/ccip/internal/cache/once.go new file mode 100644 index 00000000000..713501a03e1 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/once.go @@ -0,0 +1,38 @@ +package cache + +import ( + "context" + "sync" +) + +type OnceCtxFunction[T any] func(ctx context.Context) (T, error) + +// CallOnceOnNoError returns a new function that wraps the given function f with caching capabilities. +// If f returns an error, the result is not cached, allowing f to be retried on subsequent calls. +// Use case for that is to avoid caching an error forever in case of transient errors (e.g. flaky RPC) +func CallOnceOnNoError[T any](f OnceCtxFunction[T]) OnceCtxFunction[T] { + var ( + mu sync.Mutex + value T + err error + called bool + ) + + return func(ctx context.Context) (T, error) { + mu.Lock() + defer mu.Unlock() + + // If the function has been called successfully before, return the cached result. + if called && err == nil { + return value, nil + } + + // Call the function and cache the result only if there is no error. + value, err = f(ctx) + if err == nil { + called = true + } + + return value, err + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/cache/once_test.go b/core/services/ocr2/plugins/ccip/internal/cache/once_test.go new file mode 100644 index 00000000000..6ba2fbddd53 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/cache/once_test.go @@ -0,0 +1,83 @@ +package cache + +import ( + "context" + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" +) + +// TestCallOnceOnNoErrorCachingSuccess tests caching behavior when the function succeeds. +func TestCallOnceOnNoErrorCachingSuccess(t *testing.T) { + callCount := 0 + testFunc := func(ctx context.Context) (string, error) { + callCount++ + return "test result", nil + } + + cachedFunc := CallOnceOnNoError(testFunc) + + // Call the function twice. + _, err := cachedFunc(tests.Context(t)) + assert.NoError(t, err, "Expected no error on the first call") + + _, err = cachedFunc(tests.Context(t)) + assert.NoError(t, err, "Expected no error on the second call") + + assert.Equal(t, 1, callCount, "Function should be called exactly once") +} + +// TestCallOnceOnNoErrorCachingError tests that the function is retried after an error. +func TestCallOnceOnNoErrorCachingError(t *testing.T) { + callCount := 0 + testFunc := func(ctx context.Context) (string, error) { + callCount++ + if callCount == 1 { + return "", errors.New("test error") + } + return "test result", nil + } + + cachedFunc := CallOnceOnNoError(testFunc) + + // First call should fail. + _, err := cachedFunc(tests.Context(t)) + require.Error(t, err, "Expected an error on the first call") + + // Second call should succeed. + r, err := cachedFunc(tests.Context(t)) + assert.NoError(t, err, "Expected no error on the second call") + assert.Equal(t, "test result", r) + assert.Equal(t, 2, callCount, "Function should be called exactly twice") +} + +// TestCallOnceOnNoErrorCachingConcurrency tests that the function works correctly under concurrent access. +func TestCallOnceOnNoErrorCachingConcurrency(t *testing.T) { + var wg sync.WaitGroup + callCount := 0 + testFunc := func(ctx context.Context) (string, error) { + callCount++ + return "test result", nil + } + + cachedFunc := CallOnceOnNoError(testFunc) + + // Simulate concurrent calls. + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, err := cachedFunc(tests.Context(t)) + assert.NoError(t, err, "Expected no error in concurrent execution") + }() + } + + wg.Wait() + + assert.Equal(t, 1, callCount, "Function should be called exactly once despite concurrent calls") +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go new file mode 100644 index 00000000000..40cdab6df9d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/addr.go @@ -0,0 +1,44 @@ +package ccipcalc + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +func EvmAddrsToGeneric(evmAddrs ...common.Address) []cciptypes.Address { + res := make([]cciptypes.Address, 0, len(evmAddrs)) + for _, addr := range evmAddrs { + res = append(res, cciptypes.Address(addr.String())) + } + return res +} + +func EvmAddrToGeneric(evmAddr common.Address) cciptypes.Address { + return cciptypes.Address(evmAddr.String()) +} + +func GenericAddrsToEvm(genericAddrs ...cciptypes.Address) ([]common.Address, error) { + evmAddrs := make([]common.Address, 0, len(genericAddrs)) + for _, addr := range genericAddrs { + if !common.IsHexAddress(string(addr)) { + return nil, fmt.Errorf("%s not an evm address", addr) + } + evmAddrs = append(evmAddrs, common.HexToAddress(string(addr))) + } + return evmAddrs, nil +} + +func GenericAddrToEvm(genAddr cciptypes.Address) (common.Address, error) { + evmAddrs, err := GenericAddrsToEvm(genAddr) + if err != nil { + return common.Address{}, err + } + return evmAddrs[0], nil +} + +func HexToAddress(h string) cciptypes.Address { + return cciptypes.Address(common.HexToAddress(h).String()) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go new file mode 100644 index 00000000000..8ba57e77ed2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc.go @@ -0,0 +1,69 @@ +package ccipcalc + +import ( + "math/big" + "sort" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +// ContiguousReqs checks if seqNrs contains all numbers from min to max. +func ContiguousReqs(lggr logger.Logger, min, max uint64, seqNrs []uint64) bool { + if int(max-min+1) != len(seqNrs) { + return false + } + + for i, j := min, 0; i <= max && j < len(seqNrs); i, j = i+1, j+1 { + if seqNrs[j] != i { + lggr.Errorw("unexpected gap in seq nums", "seqNr", i, "minSeqNr", min, "maxSeqNr", max) + return false + } + } + return true +} + +// CalculateUsdPerUnitGas returns: (sourceGasPrice * usdPerFeeCoin) / 1e18 +func CalculateUsdPerUnitGas(sourceGasPrice *big.Int, usdPerFeeCoin *big.Int) *big.Int { + // (wei / gas) * (usd / eth) * (1 eth / 1e18 wei) = usd/gas + tmp := new(big.Int).Mul(sourceGasPrice, usdPerFeeCoin) + return tmp.Div(tmp, big.NewInt(1e18)) +} + +// BigIntSortedMiddle returns the middle number after sorting the provided numbers. nil is returned if the provided slice is empty. +// If length of the provided slice is even, the right-hand-side value of the middle 2 numbers is returned. +// The objective of this function is to always pick within the range of values reported by honest nodes when we have 2f+1 values. +func BigIntSortedMiddle(vals []*big.Int) *big.Int { + if len(vals) == 0 { + return nil + } + + valsCopy := make([]*big.Int, len(vals)) + copy(valsCopy[:], vals[:]) + sort.Slice(valsCopy, func(i, j int) bool { + return valsCopy[i].Cmp(valsCopy[j]) == -1 + }) + return valsCopy[len(valsCopy)/2] +} + +// Deviates checks if x1 and x2 deviates based on the provided ppb (parts per billion) +// ppb is calculated based on the smaller value of the two +// e.g, if x1 > x2, deviation_parts_per_billion = ((x1 - x2) / x2) * 1e9 +func Deviates(x1, x2 *big.Int, ppb int64) bool { + // if x1 == 0 or x2 == 0, deviates if x2 != x1, to avoid the relative division by 0 error + if x1.BitLen() == 0 || x2.BitLen() == 0 { + return x1.Cmp(x2) != 0 + } + diff := big.NewInt(0).Sub(x1, x2) // diff = x1-x2 + diff.Mul(diff, big.NewInt(1e9)) // diff = diff * 1e9 + // dividing by the smaller value gives consistent ppb regardless of input order, and supports >100% deviation. + if x1.Cmp(x2) > 0 { + diff.Div(diff, x2) + } else { + diff.Div(diff, x1) + } + return diff.CmpAbs(big.NewInt(ppb)) > 0 // abs(diff) > ppb +} + +func MergeEpochAndRound(epoch uint32, round uint8) uint64 { + return uint64(epoch)<<8 + uint64(round) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go new file mode 100644 index 00000000000..83384eca481 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipcalc/calc_test.go @@ -0,0 +1,220 @@ +package ccipcalc + +import ( + "math" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestMergeEpochAndRound(t *testing.T) { + type args struct { + epoch uint32 + round uint8 + } + tests := []struct { + name string + args args + want uint64 + }{ + { + name: "zero round and epoch", + args: args{epoch: 0, round: 0}, + want: 0, + }, + { + name: "avg case", + args: args{ + epoch: 243, + round: 15, + }, + want: 62223, + }, + { + name: "largest epoch and round", + args: args{ + epoch: math.MaxUint32, + round: math.MaxUint8, + }, + want: 1099511627775, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, + MergeEpochAndRound(tt.args.epoch, tt.args.round), + "mergeEpochAndRound(%v, %v)", tt.args.epoch, tt.args.round) + }) + } +} + +func TestContiguousReqs(t *testing.T) { + testCases := []struct { + min uint64 + max uint64 + seqNrs []uint64 + exp bool + }{ + {min: 5, max: 10, seqNrs: []uint64{5, 6, 7, 8, 9, 10}, exp: true}, + {min: 5, max: 10, seqNrs: []uint64{5, 7, 8, 9, 10}, exp: false}, + {min: 5, max: 10, seqNrs: []uint64{5, 6, 7, 8, 9, 10, 11}, exp: false}, + {min: 5, max: 10, seqNrs: []uint64{}, exp: false}, + {min: 1, max: 1, seqNrs: []uint64{1}, exp: true}, + {min: 6, max: 10, seqNrs: []uint64{5, 7, 8, 9, 10}, exp: false}, + } + + for _, tc := range testCases { + res := ContiguousReqs(logger.NullLogger, tc.min, tc.max, tc.seqNrs) + assert.Equal(t, tc.exp, res) + } +} + +func TestCalculateUsdPerUnitGas(t *testing.T) { + testCases := []struct { + name string + sourceGasPrice *big.Int + usdPerFeeCoin *big.Int + exp *big.Int + }{ + { + name: "base case", + sourceGasPrice: big.NewInt(2e18), + usdPerFeeCoin: big.NewInt(3e18), + exp: big.NewInt(6e18), + }, + { + name: "small numbers", + sourceGasPrice: big.NewInt(1000), + usdPerFeeCoin: big.NewInt(2000), + exp: big.NewInt(0), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := CalculateUsdPerUnitGas(tc.sourceGasPrice, tc.usdPerFeeCoin) + assert.Zero(t, tc.exp.Cmp(res)) + }) + } +} + +func TestBigIntSortedMiddle(t *testing.T) { + tests := []struct { + name string + vals []*big.Int + want *big.Int + }{ + { + name: "base case", + vals: []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(4), big.NewInt(5)}, + want: big.NewInt(4), + }, + { + name: "not sorted", + vals: []*big.Int{big.NewInt(100), big.NewInt(50), big.NewInt(30), big.NewInt(110)}, + want: big.NewInt(100), + }, + { + name: "empty slice", + vals: []*big.Int{}, + want: nil, + }, + { + name: "one item", + vals: []*big.Int{big.NewInt(123)}, + want: big.NewInt(123), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, BigIntSortedMiddle(tt.vals), "BigIntSortedMiddle(%v)", tt.vals) + }) + } +} + +func TestDeviates(t *testing.T) { + type args struct { + x1 *big.Int + x2 *big.Int + ppb int64 + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "base case", + args: args{x1: big.NewInt(1e9), x2: big.NewInt(2e9), ppb: 1}, + want: true, + }, + { + name: "x1 is zero and x1 neq x2", + args: args{x1: big.NewInt(0), x2: big.NewInt(1), ppb: 999}, + want: true, + }, + { + name: "x2 is zero and x1 neq x2", + args: args{x1: big.NewInt(1), x2: big.NewInt(0), ppb: 999}, + want: true, + }, + { + name: "x1 and x2 are both zero", + args: args{x1: big.NewInt(0), x2: big.NewInt(0), ppb: 999}, + want: false, + }, + { + name: "deviates when ppb is 0", + args: args{x1: big.NewInt(0), x2: big.NewInt(1), ppb: 0}, + want: true, + }, + { + name: "does not deviate when x1 eq x2", + args: args{x1: big.NewInt(5), x2: big.NewInt(5), ppb: 1}, + want: false, + }, + { + name: "does not deviate with high ppb when x2 is greater", + args: args{x1: big.NewInt(5), x2: big.NewInt(10), ppb: 2e9}, + want: false, + }, + { + name: "does not deviate with high ppb when x1 is greater", + args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 2e9}, + want: false, + }, + { + name: "deviates with low ppb when x2 is greater", + args: args{x1: big.NewInt(5), x2: big.NewInt(10), ppb: 9e8}, + want: true, + }, + { + name: "deviates with low ppb when x1 is greater", + args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 9e8}, + want: true, + }, + { + name: "near deviation limit but deviates", + args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9 - 1}, + want: true, + }, + { + name: "at deviation limit but does not deviate", + args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9}, + want: false, + }, + { + name: "near deviation limit but does not deviate", + args: args{x1: big.NewInt(10), x2: big.NewInt(5), ppb: 1e9 + 1}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Deviates(tt.args.x1, tt.args.x2, tt.args.ppb), "Deviates(%v, %v, %v)", tt.args.x1, tt.args.x2, tt.args.ppb) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go new file mode 100644 index 00000000000..4f5ba6cfaea --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts.go @@ -0,0 +1,140 @@ +package ccipcommon + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "sort" + "strings" + "time" + + "github.com/avast/retry-go/v4" + + "golang.org/x/sync/errgroup" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +func GetMessageIDsAsHexString(messages []cciptypes.EVM2EVMMessage) []string { + messageIDs := make([]string, 0, len(messages)) + for _, m := range messages { + messageIDs = append(messageIDs, "0x"+hex.EncodeToString(m.MessageID[:])) + } + return messageIDs +} + +type BackfillArgs struct { + SourceLP, DestLP logpoller.LogPoller + SourceStartBlock, DestStartBlock uint64 +} + +// GetFilteredSortedLaneTokens returns union of tokens supported on this lane, including fee tokens from the provided price registry +// and the bridgeable tokens from offRamp. Bridgeable tokens are only included if they are configured on the pricegetter +// Fee tokens are not filtered as they must always be priced +func GetFilteredSortedLaneTokens(ctx context.Context, offRamp ccipdata.OffRampReader, priceRegistry cciptypes.PriceRegistryReader, priceGetter cciptypes.PriceGetter) (laneTokens []cciptypes.Address, excludedTokens []cciptypes.Address, err error) { + destFeeTokens, destBridgeableTokens, err := GetDestinationTokens(ctx, offRamp, priceRegistry) + if err != nil { + return nil, nil, fmt.Errorf("get tokens with batch limit: %w", err) + } + + destTokensWithPrice, destTokensWithoutPrice, err := priceGetter.FilterConfiguredTokens(ctx, destBridgeableTokens) + if err != nil { + return nil, nil, fmt.Errorf("filter for priced tokens: %w", err) + } + + return flattenedAndSortedTokens(destFeeTokens, destTokensWithPrice), destTokensWithoutPrice, nil +} + +func flattenedAndSortedTokens(slices ...[]cciptypes.Address) (tokens []cciptypes.Address) { + // fee token can overlap with bridgeable tokens, we need to dedup them to arrive at lane token set + tokens = FlattenUniqueSlice(slices...) + + // return the tokens in deterministic order to aid with testing and debugging + sort.Slice(tokens, func(i, j int) bool { + return tokens[i] < tokens[j] + }) + + return tokens +} + +// GetDestinationTokens returns the destination chain fee tokens from the provided price registry +// and the bridgeable tokens from the offramp. +func GetDestinationTokens(ctx context.Context, offRamp ccipdata.OffRampReader, priceRegistry cciptypes.PriceRegistryReader) (fee, bridged []cciptypes.Address, err error) { + eg := new(errgroup.Group) + + var destFeeTokens []cciptypes.Address + var destBridgeableTokens []cciptypes.Address + + eg.Go(func() error { + tokens, err := priceRegistry.GetFeeTokens(ctx) + if err != nil { + return fmt.Errorf("get dest fee tokens: %w", err) + } + destFeeTokens = tokens + return nil + }) + + eg.Go(func() error { + tokens, err := offRamp.GetTokens(ctx) + if err != nil { + return fmt.Errorf("get dest bridgeable tokens: %w", err) + } + destBridgeableTokens = tokens.DestinationTokens + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + return destFeeTokens, destBridgeableTokens, nil +} + +// FlattenUniqueSlice returns a flattened slice that contains unique elements by preserving their order. +func FlattenUniqueSlice[T comparable](slices ...[]T) []T { + seen := make(map[T]struct{}) + flattened := make([]T, 0) + + for _, sl := range slices { + for _, el := range sl { + if _, exists := seen[el]; !exists { + flattened = append(flattened, el) + seen[el] = struct{}{} + } + } + } + return flattened +} + +func IsTxRevertError(err error) bool { + if err == nil { + return false + } + + // Geth eth_call reverts with "execution reverted" + // Nethermind, Parity, OpenEthereum eth_call reverts with "VM execution error" + // See: https://github.com/ethereum/go-ethereum/issues/21886 + return strings.Contains(err.Error(), "execution reverted") || strings.Contains(err.Error(), "VM execution error") +} + +func SelectorToBytes(chainSelector uint64) [16]byte { + var b [16]byte + binary.BigEndian.PutUint64(b[:], chainSelector) + return b +} + +// RetryUntilSuccess repeatedly calls fn until it returns a nil error. After each failed call there is an exponential +// backoff applied, between initialDelay and maxDelay. +func RetryUntilSuccess[T any](fn func() (T, error), initialDelay time.Duration, maxDelay time.Duration) (T, error) { + return retry.DoWithData( + fn, + retry.Delay(initialDelay), + retry.MaxDelay(maxDelay), + retry.DelayType(retry.BackOffDelay), + retry.UntilSucceeded(), + ) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go new file mode 100644 index 00000000000..73a3b834956 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipcommon/shortcuts_test.go @@ -0,0 +1,196 @@ +package ccipcommon + +import ( + "fmt" + "math/rand" + "sort" + "strconv" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" +) + +func TestGetMessageIDsAsHexString(t *testing.T) { + t.Run("base", func(t *testing.T) { + hashes := make([]cciptypes.Hash, 10) + for i := range hashes { + hashes[i] = cciptypes.Hash(common.HexToHash(strconv.Itoa(rand.Intn(100000)))) + } + + msgs := make([]cciptypes.EVM2EVMMessage, len(hashes)) + for i := range msgs { + msgs[i] = cciptypes.EVM2EVMMessage{MessageID: hashes[i]} + } + + messageIDs := GetMessageIDsAsHexString(msgs) + for i := range messageIDs { + assert.Equal(t, hashes[i].String(), messageIDs[i]) + } + }) + + t.Run("empty", func(t *testing.T) { + messageIDs := GetMessageIDsAsHexString(nil) + assert.Empty(t, messageIDs) + }) +} + +func TestFlattenUniqueSlice(t *testing.T) { + testCases := []struct { + name string + inputSlices [][]int + expectedOutput []int + }{ + {name: "empty", inputSlices: nil, expectedOutput: []int{}}, + {name: "empty 2", inputSlices: [][]int{}, expectedOutput: []int{}}, + {name: "single", inputSlices: [][]int{{1, 2, 3, 3, 3, 4}}, expectedOutput: []int{1, 2, 3, 4}}, + {name: "simple", inputSlices: [][]int{{1, 2, 3}, {2, 3, 4}}, expectedOutput: []int{1, 2, 3, 4}}, + { + name: "more complex case", + inputSlices: [][]int{{1, 3}, {2, 4, 3}, {5, 2, -1, 7, 10}}, + expectedOutput: []int{1, 3, 2, 4, 5, -1, 7, 10}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res := FlattenUniqueSlice(tc.inputSlices...) + assert.Equal(t, tc.expectedOutput, res) + }) + } +} + +func TestGetFilteredChainTokens(t *testing.T) { + const numTokens = 6 + var tokens []cciptypes.Address + for i := 0; i < numTokens; i++ { + tokens = append(tokens, ccipcalc.EvmAddrToGeneric(utils.RandomAddress())) + } + + testCases := []struct { + name string + feeTokens []cciptypes.Address + destTokens []cciptypes.Address + expectedChainTokens []cciptypes.Address + expectedFilteredTokens []cciptypes.Address + }{ + { + name: "empty", + feeTokens: []cciptypes.Address{}, + destTokens: []cciptypes.Address{}, + expectedChainTokens: []cciptypes.Address{}, + expectedFilteredTokens: []cciptypes.Address{}, + }, + { + name: "unique tokens", + feeTokens: []cciptypes.Address{tokens[0]}, + destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[3]}, + expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3]}, + expectedFilteredTokens: []cciptypes.Address{tokens[4], tokens[5]}, + }, + { + name: "all tokens", + feeTokens: []cciptypes.Address{tokens[0]}, + destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]}, + expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]}, + expectedFilteredTokens: []cciptypes.Address{}, + }, + { + name: "overlapping tokens", + feeTokens: []cciptypes.Address{tokens[0]}, + destTokens: []cciptypes.Address{tokens[1], tokens[2], tokens[5], tokens[3], tokens[0], tokens[2], tokens[3], tokens[4], tokens[5], tokens[5]}, + expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4], tokens[5]}, + expectedFilteredTokens: []cciptypes.Address{}, + }, + { + name: "unconfigured tokens", + feeTokens: []cciptypes.Address{tokens[0]}, + destTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[0], tokens[2], tokens[3], tokens[4], tokens[5], tokens[5]}, + expectedChainTokens: []cciptypes.Address{tokens[0], tokens[1], tokens[2], tokens[3], tokens[4]}, + expectedFilteredTokens: []cciptypes.Address{tokens[5]}, + }, + } + + ctx := testutils.Context(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + priceRegistry := ccipdatamocks.NewPriceRegistryReader(t) + priceRegistry.On("GetFeeTokens", ctx).Return(tc.feeTokens, nil).Once() + + priceGet := pricegetter.NewMockPriceGetter(t) + priceGet.On("FilterConfiguredTokens", mock.Anything, mock.Anything).Return(tc.expectedChainTokens, tc.expectedFilteredTokens, nil) + + offRamp := ccipdatamocks.NewOffRampReader(t) + offRamp.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{DestinationTokens: tc.destTokens}, nil).Once() + + chainTokens, filteredTokens, err := GetFilteredSortedLaneTokens(ctx, offRamp, priceRegistry, priceGet) + assert.NoError(t, err) + + sort.Slice(tc.expectedChainTokens, func(i, j int) bool { + return tc.expectedChainTokens[i] < tc.expectedChainTokens[j] + }) + assert.Equal(t, tc.expectedChainTokens, chainTokens) + assert.Equal(t, tc.expectedFilteredTokens, filteredTokens) + }) + } +} + +func TestIsTxRevertError(t *testing.T) { + testCases := []struct { + name string + inputError error + expectedOutput bool + }{ + {name: "empty", inputError: nil, expectedOutput: false}, + {name: "non-revert error", inputError: fmt.Errorf("nothing"), expectedOutput: false}, + {name: "geth error", inputError: fmt.Errorf("execution reverted"), expectedOutput: true}, + {name: "nethermind error", inputError: fmt.Errorf("VM execution error"), expectedOutput: true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expectedOutput, IsTxRevertError(tc.inputError)) + }) + } +} + +func TestRetryUntilSuccess(t *testing.T) { + // Set delays to 0 for tests + initialDelay := 0 * time.Nanosecond + maxDelay := 0 * time.Nanosecond + + numAttempts := 5 + numCalls := 0 + // A function that returns success only after numAttempts calls. RetryUntilSuccess will repeatedly call this + // function until it succeeds. + fn := func() (int, error) { + numCalls++ + numAttempts-- + if numAttempts > 0 { + return numCalls, fmt.Errorf("") + } + return numCalls, nil + } + + // Assert that RetryUntilSuccess returns the expected value when fn returns success on the 5th attempt + numCalls, err := RetryUntilSuccess(fn, initialDelay, maxDelay) + assert.Nil(t, err) + assert.Equal(t, 5, numCalls) + + // Assert that RetryUntilSuccess returns the expected value when fn returns success on the 8th attempt + numAttempts = 8 + numCalls = 0 + numCalls, err = RetryUntilSuccess(fn, initialDelay, maxDelay) + assert.Nil(t, err) + assert.Equal(t, 8, numCalls) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go new file mode 100644 index 00000000000..551cd7c6a68 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/mocks/token_pool_batched_reader_mock.go @@ -0,0 +1,142 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + mock "github.com/stretchr/testify/mock" +) + +// TokenPoolBatchedReader is an autogenerated mock type for the TokenPoolBatchedReader type +type TokenPoolBatchedReader struct { + mock.Mock +} + +type TokenPoolBatchedReader_Expecter struct { + mock *mock.Mock +} + +func (_m *TokenPoolBatchedReader) EXPECT() *TokenPoolBatchedReader_Expecter { + return &TokenPoolBatchedReader_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *TokenPoolBatchedReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// TokenPoolBatchedReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type TokenPoolBatchedReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *TokenPoolBatchedReader_Expecter) Close() *TokenPoolBatchedReader_Close_Call { + return &TokenPoolBatchedReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *TokenPoolBatchedReader_Close_Call) Run(run func()) *TokenPoolBatchedReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *TokenPoolBatchedReader_Close_Call) Return(_a0 error) *TokenPoolBatchedReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TokenPoolBatchedReader_Close_Call) RunAndReturn(run func() error) *TokenPoolBatchedReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// GetInboundTokenPoolRateLimits provides a mock function with given fields: ctx, tokenPoolReaders +func (_m *TokenPoolBatchedReader) GetInboundTokenPoolRateLimits(ctx context.Context, tokenPoolReaders []ccip.Address) ([]ccip.TokenBucketRateLimit, error) { + ret := _m.Called(ctx, tokenPoolReaders) + + if len(ret) == 0 { + panic("no return value specified for GetInboundTokenPoolRateLimits") + } + + var r0 []ccip.TokenBucketRateLimit + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.TokenBucketRateLimit, error)); ok { + return rf(ctx, tokenPoolReaders) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.TokenBucketRateLimit); ok { + r0 = rf(ctx, tokenPoolReaders) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.TokenBucketRateLimit) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok { + r1 = rf(ctx, tokenPoolReaders) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInboundTokenPoolRateLimits' +type TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call struct { + *mock.Call +} + +// GetInboundTokenPoolRateLimits is a helper method to define mock.On call +// - ctx context.Context +// - tokenPoolReaders []ccip.Address +func (_e *TokenPoolBatchedReader_Expecter) GetInboundTokenPoolRateLimits(ctx interface{}, tokenPoolReaders interface{}) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call { + return &TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call{Call: _e.mock.On("GetInboundTokenPoolRateLimits", ctx, tokenPoolReaders)} +} + +func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) Run(run func(ctx context.Context, tokenPoolReaders []ccip.Address)) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) Return(_a0 []ccip.TokenBucketRateLimit, _a1 error) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.TokenBucketRateLimit, error)) *TokenPoolBatchedReader_GetInboundTokenPoolRateLimits_Call { + _c.Call.Return(run) + return _c +} + +// NewTokenPoolBatchedReader creates a new instance of TokenPoolBatchedReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTokenPoolBatchedReader(t interface { + mock.TestingT + Cleanup(func()) +}) *TokenPoolBatchedReader { + mock := &TokenPoolBatchedReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go new file mode 100644 index 00000000000..57e8df1bde3 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader.go @@ -0,0 +1,192 @@ +package batchreader + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + type_and_version "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/type_and_version_interface_wrapper" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0" +) + +var ( + typeAndVersionABI = abihelpers.MustParseABI(type_and_version.TypeAndVersionInterfaceABI) +) + +type EVMTokenPoolBatchedReader struct { + lggr logger.Logger + remoteChainSelector uint64 + offRampAddress common.Address + evmBatchCaller rpclib.EvmBatchCaller + + tokenPoolReaders map[cciptypes.Address]ccipdata.TokenPoolReader + tokenPoolReaderMu sync.RWMutex +} + +type TokenPoolBatchedReader interface { + cciptypes.TokenPoolBatchedReader +} + +var _ TokenPoolBatchedReader = (*EVMTokenPoolBatchedReader)(nil) + +func NewEVMTokenPoolBatchedReader(lggr logger.Logger, remoteChainSelector uint64, offRampAddress cciptypes.Address, evmBatchCaller rpclib.EvmBatchCaller) (*EVMTokenPoolBatchedReader, error) { + offRampAddrEvm, err := ccipcalc.GenericAddrToEvm(offRampAddress) + if err != nil { + return nil, err + } + + return &EVMTokenPoolBatchedReader{ + lggr: lggr, + remoteChainSelector: remoteChainSelector, + offRampAddress: offRampAddrEvm, + evmBatchCaller: evmBatchCaller, + tokenPoolReaders: make(map[cciptypes.Address]ccipdata.TokenPoolReader), + }, nil +} + +func (br *EVMTokenPoolBatchedReader) GetInboundTokenPoolRateLimits(ctx context.Context, tokenPools []cciptypes.Address) ([]cciptypes.TokenBucketRateLimit, error) { + if len(tokenPools) == 0 { + return []cciptypes.TokenBucketRateLimit{}, nil + } + + err := br.loadTokenPoolReaders(ctx, tokenPools) + if err != nil { + return nil, err + } + + tokenPoolReaders := make([]ccipdata.TokenPoolReader, 0, len(tokenPools)) + for _, poolAddress := range tokenPools { + br.tokenPoolReaderMu.RLock() + tokenPoolReader, exists := br.tokenPoolReaders[poolAddress] + br.tokenPoolReaderMu.RUnlock() + if !exists { + return nil, fmt.Errorf("token pool %s not found", poolAddress) + } + tokenPoolReaders = append(tokenPoolReaders, tokenPoolReader) + } + + evmCalls := make([]rpclib.EvmCall, 0, len(tokenPoolReaders)) + for _, poolReader := range tokenPoolReaders { + switch v := poolReader.(type) { + case *v1_2_0.TokenPool: + evmCalls = append(evmCalls, v1_2_0.GetInboundTokenPoolRateLimitCall(v.Address(), v.OffRampAddress)) + case *v1_4_0.TokenPool: + evmCalls = append(evmCalls, v1_4_0.GetInboundTokenPoolRateLimitCall(v.Address(), v.RemoteChainSelector)) + default: + return nil, fmt.Errorf("unsupported token pool version %T", v) + } + } + + results, err := br.evmBatchCaller.BatchCall(ctx, 0, evmCalls) + if err != nil { + return nil, fmt.Errorf("batch call limit: %w", err) + } + + resultsParsed, err := rpclib.ParseOutputs[cciptypes.TokenBucketRateLimit](results, func(d rpclib.DataAndErr) (cciptypes.TokenBucketRateLimit, error) { + return rpclib.ParseOutput[cciptypes.TokenBucketRateLimit](d, 0) + }) + if err != nil { + return nil, fmt.Errorf("parse outputs: %w", err) + } + return resultsParsed, nil +} + +// loadTokenPoolReaders loads the token pools into the factory's cache +func (br *EVMTokenPoolBatchedReader) loadTokenPoolReaders(ctx context.Context, tokenPoolAddresses []cciptypes.Address) error { + var missingTokens []common.Address + + br.tokenPoolReaderMu.RLock() + for _, poolAddress := range tokenPoolAddresses { + if _, exists := br.tokenPoolReaders[poolAddress]; !exists { + evmPoolAddr, err := ccipcalc.GenericAddrToEvm(poolAddress) + if err != nil { + return err + } + missingTokens = append(missingTokens, evmPoolAddr) + } + } + br.tokenPoolReaderMu.RUnlock() + + // Only continue if there are missing tokens + if len(missingTokens) == 0 { + return nil + } + + typeAndVersions, err := getBatchedTypeAndVersion(ctx, br.evmBatchCaller, missingTokens) + if err != nil { + return err + } + + br.tokenPoolReaderMu.Lock() + defer br.tokenPoolReaderMu.Unlock() + for i, tokenPoolAddress := range missingTokens { + typeAndVersion := typeAndVersions[i] + poolType, version, err := ccipconfig.ParseTypeAndVersion(typeAndVersion) + if err != nil { + return err + } + switch version { + case ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0: + br.tokenPoolReaders[ccipcalc.EvmAddrToGeneric(tokenPoolAddress)] = v1_2_0.NewTokenPool(poolType, tokenPoolAddress, br.offRampAddress) + case ccipdata.V1_4_0: + br.tokenPoolReaders[ccipcalc.EvmAddrToGeneric(tokenPoolAddress)] = v1_4_0.NewTokenPool(poolType, tokenPoolAddress, br.remoteChainSelector) + default: + return fmt.Errorf("unsupported token pool version %v", version) + } + } + return nil +} + +func getBatchedTypeAndVersion(ctx context.Context, evmBatchCaller rpclib.EvmBatchCaller, poolAddresses []common.Address) ([]string, error) { + var evmCalls []rpclib.EvmCall + + for _, poolAddress := range poolAddresses { + // Add the typeAndVersion call to the batch + evmCalls = append(evmCalls, rpclib.NewEvmCall( + typeAndVersionABI, + "typeAndVersion", + poolAddress, + )) + } + + results, err := evmBatchCaller.BatchCall(ctx, 0, evmCalls) + if err != nil { + return nil, fmt.Errorf("batch call limit: %w", err) + } + + result, err := rpclib.ParseOutputs[string](results, func(d rpclib.DataAndErr) (string, error) { + tAndV, err1 := rpclib.ParseOutput[string](d, 0) + if err1 != nil { + // typeAndVersion method do not exist for 1.0 pools. We are going to get an ErrEmptyOutput in that case. + // Some chains, like the simulated chains, will simply revert with "execution reverted" + if errors.Is(err1, rpclib.ErrEmptyOutput) || ccipcommon.IsTxRevertError(err1) { + return "LegacyPool " + ccipdata.V1_0_0, nil + } + return "", err1 + } + + return tAndV, nil + }) + if err != nil { + return nil, fmt.Errorf("parse outputs: %w", err) + } + return result, nil +} + +func (br *EVMTokenPoolBatchedReader) Close() error { + return nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go new file mode 100644 index 00000000000..c67c3c15276 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/batchreader/token_pool_batch_reader_test.go @@ -0,0 +1,86 @@ +package batchreader + +import ( + "context" + "fmt" + "math/big" + "testing" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +func TestTokenPoolFactory(t *testing.T) { + lggr := logger.TestLogger(t) + offRamp := utils.RandomAddress() + ctx := context.Background() + remoteChainSelector := uint64(2000) + batchCallerMock := rpclibmocks.NewEvmBatchCaller(t) + + tokenPoolBatchReader, err := NewEVMTokenPoolBatchedReader(lggr, remoteChainSelector, ccipcalc.EvmAddrToGeneric(offRamp), batchCallerMock) + assert.NoError(t, err) + + poolTypes := []string{"BurnMint", "LockRelease"} + + rateLimits := cciptypes.TokenBucketRateLimit{ + Tokens: big.NewInt(333333), + LastUpdated: 33, + IsEnabled: true, + Capacity: big.NewInt(666666), + Rate: big.NewInt(444444), + } + + for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0, ccipdata.V1_4_0} { + gotRateLimits, err := tokenPoolBatchReader.GetInboundTokenPoolRateLimits(ctx, []cciptypes.Address{}) + require.NoError(t, err) + assert.Empty(t, gotRateLimits) + + var batchCallResult []rpclib.DataAndErr + for _, poolType := range poolTypes { + if versionStr == ccipdata.V1_0_0 { + // simulating the behaviour for 1.0.0 pools where typeAndVersion method does not exist + batchCallResult = append(batchCallResult, rpclib.DataAndErr{ + Err: fmt.Errorf("unpack result: %w", rpclib.ErrEmptyOutput), + }) + } else { + batchCallResult = append(batchCallResult, rpclib.DataAndErr{ + Outputs: []any{poolType + " " + versionStr}, + Err: nil, + }) + } + } + + batchCallerMock.On("BatchCall", ctx, uint64(0), mock.Anything).Return(batchCallResult, nil).Once() + batchCallerMock.On("BatchCall", ctx, uint64(0), mock.Anything).Return([]rpclib.DataAndErr{{ + Outputs: []any{rateLimits}, + Err: nil, + }, { + Outputs: []any{rateLimits}, + Err: nil, + }}, nil).Once() + + var poolAddresses []cciptypes.Address + + for i := 0; i < len(poolTypes); i++ { + poolAddresses = append(poolAddresses, ccipcalc.EvmAddrToGeneric(utils.RandomAddress())) + } + + gotRateLimits, err = tokenPoolBatchReader.GetInboundTokenPoolRateLimits(ctx, poolAddresses) + require.NoError(t, err) + assert.Len(t, gotRateLimits, len(poolTypes)) + + for _, gotRateLimit := range gotRateLimits { + assert.Equal(t, rateLimits, gotRateLimit) + } + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go new file mode 100644 index 00000000000..59588a25d17 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/mocks/price_registry_mock.go @@ -0,0 +1,97 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// PriceRegistry is an autogenerated mock type for the PriceRegistry type +type PriceRegistry struct { + mock.Mock +} + +type PriceRegistry_Expecter struct { + mock *mock.Mock +} + +func (_m *PriceRegistry) EXPECT() *PriceRegistry_Expecter { + return &PriceRegistry_Expecter{mock: &_m.Mock} +} + +// NewPriceRegistryReader provides a mock function with given fields: ctx, addr +func (_m *PriceRegistry) NewPriceRegistryReader(ctx context.Context, addr ccip.Address) (ccip.PriceRegistryReader, error) { + ret := _m.Called(ctx, addr) + + if len(ret) == 0 { + panic("no return value specified for NewPriceRegistryReader") + } + + var r0 ccip.PriceRegistryReader + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ccip.Address) (ccip.PriceRegistryReader, error)); ok { + return rf(ctx, addr) + } + if rf, ok := ret.Get(0).(func(context.Context, ccip.Address) ccip.PriceRegistryReader); ok { + r0 = rf(ctx, addr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ccip.PriceRegistryReader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ccip.Address) error); ok { + r1 = rf(ctx, addr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistry_NewPriceRegistryReader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewPriceRegistryReader' +type PriceRegistry_NewPriceRegistryReader_Call struct { + *mock.Call +} + +// NewPriceRegistryReader is a helper method to define mock.On call +// - ctx context.Context +// - addr ccip.Address +func (_e *PriceRegistry_Expecter) NewPriceRegistryReader(ctx interface{}, addr interface{}) *PriceRegistry_NewPriceRegistryReader_Call { + return &PriceRegistry_NewPriceRegistryReader_Call{Call: _e.mock.On("NewPriceRegistryReader", ctx, addr)} +} + +func (_c *PriceRegistry_NewPriceRegistryReader_Call) Run(run func(ctx context.Context, addr ccip.Address)) *PriceRegistry_NewPriceRegistryReader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ccip.Address)) + }) + return _c +} + +func (_c *PriceRegistry_NewPriceRegistryReader_Call) Return(_a0 ccip.PriceRegistryReader, _a1 error) *PriceRegistry_NewPriceRegistryReader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistry_NewPriceRegistryReader_Call) RunAndReturn(run func(context.Context, ccip.Address) (ccip.PriceRegistryReader, error)) *PriceRegistry_NewPriceRegistryReader_Call { + _c.Call.Return(run) + return _c +} + +// NewPriceRegistry creates a new instance of PriceRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPriceRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *PriceRegistry { + mock := &PriceRegistry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go new file mode 100644 index 00000000000..d1666d548ae --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/ccipdataprovider/provider.go @@ -0,0 +1,40 @@ +package ccipdataprovider + +import ( + "context" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/observability" +) + +type PriceRegistry interface { + NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (cciptypes.PriceRegistryReader, error) +} + +type EvmPriceRegistry struct { + lp logpoller.LogPoller + ec client.Client + lggr logger.Logger + pluginLabel string +} + +func NewEvmPriceRegistry(lp logpoller.LogPoller, ec client.Client, lggr logger.Logger, pluginLabel string) *EvmPriceRegistry { + return &EvmPriceRegistry{ + lp: lp, + ec: ec, + lggr: lggr, + pluginLabel: pluginLabel, + } +} + +func (p *EvmPriceRegistry) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (cciptypes.PriceRegistryReader, error) { + destPriceRegistryReader, err := factory.NewPriceRegistryReader(ctx, p.lggr, factory.NewEvmVersionFinder(), addr, p.lp, p.ec) + if err != nil { + return nil, err + } + return observability.NewPriceRegistryReader(destPriceRegistryReader, p.ec.ConfiguredChainID().Int64(), p.pluginLabel), nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go new file mode 100644 index 00000000000..2b144b765ed --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader.go @@ -0,0 +1,81 @@ +package ccipdata + +import ( + "context" + "math/big" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" +) + +// Common to all versions +type CommitOnchainConfig commit_store.CommitStoreDynamicConfig + +func (d CommitOnchainConfig) AbiString() string { + return ` + [ + { + "components": [ + {"name": "priceRegistry", "type": "address"} + ], + "type": "tuple" + } + ]` +} + +func (d CommitOnchainConfig) Validate() error { + if d.PriceRegistry == (common.Address{}) { + return errors.New("must set Price Registry address") + } + return nil +} + +func NewCommitOffchainConfig( + gasPriceDeviationPPB uint32, + gasPriceHeartBeat time.Duration, + tokenPriceDeviationPPB uint32, + tokenPriceHeartBeat time.Duration, + inflightCacheExpiry time.Duration, + priceReportingDisabled bool, +) cciptypes.CommitOffchainConfig { + return cciptypes.CommitOffchainConfig{ + GasPriceDeviationPPB: gasPriceDeviationPPB, + GasPriceHeartBeat: gasPriceHeartBeat, + TokenPriceDeviationPPB: tokenPriceDeviationPPB, + TokenPriceHeartBeat: tokenPriceHeartBeat, + InflightCacheExpiry: inflightCacheExpiry, + PriceReportingDisabled: priceReportingDisabled, + } +} + +type CommitStoreReader interface { + cciptypes.CommitStoreReader + SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error + SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error +} + +// FetchCommitStoreStaticConfig provides access to a commitStore's static config, which is required to access the source chain ID. +func FetchCommitStoreStaticConfig(address common.Address, ec client.Client) (commit_store.CommitStoreStaticConfig, error) { + commitStore, err := loadCommitStore(address, ec) + if err != nil { + return commit_store.CommitStoreStaticConfig{}, err + } + return commitStore.GetStaticConfig(&bind.CallOpts{}) +} + +func loadCommitStore(commitStoreAddress common.Address, client client.Client) (commit_store.CommitStoreInterface, error) { + _, err := ccipconfig.VerifyTypeAndVersion(commitStoreAddress, client, ccipconfig.CommitStore) + if err != nil { + return nil, errors.Wrap(err, "Invalid commitStore contract") + } + return commit_store.NewCommitStore(commitStoreAddress, client) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go new file mode 100644 index 00000000000..4e134b1f175 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/commit_store_reader_test.go @@ -0,0 +1,423 @@ +package ccipdata_test + +import ( + "context" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + gasmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" + rollupMocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +func TestCommitOffchainConfig_Encoding(t *testing.T) { + tests := map[string]struct { + want v1_2_0.JSONCommitOffchainConfig + expectErr bool + }{ + "encodes and decodes config with all fields set": { + want: v1_2_0.JSONCommitOffchainConfig{ + SourceFinalityDepth: 3, + DestFinalityDepth: 3, + GasPriceHeartBeat: *config.MustNewDuration(1 * time.Hour), + DAGasPriceDeviationPPB: 5e7, + ExecGasPriceDeviationPPB: 5e7, + TokenPriceHeartBeat: *config.MustNewDuration(1 * time.Hour), + TokenPriceDeviationPPB: 5e7, + InflightCacheExpiry: *config.MustNewDuration(23456 * time.Second), + }, + }, + "fails decoding when all fields present but with 0 values": { + want: v1_2_0.JSONCommitOffchainConfig{ + SourceFinalityDepth: 0, + DestFinalityDepth: 0, + GasPriceHeartBeat: *config.MustNewDuration(0), + DAGasPriceDeviationPPB: 0, + ExecGasPriceDeviationPPB: 0, + TokenPriceHeartBeat: *config.MustNewDuration(0), + TokenPriceDeviationPPB: 0, + InflightCacheExpiry: *config.MustNewDuration(0), + }, + expectErr: true, + }, + "fails decoding when all fields are missing": { + want: v1_2_0.JSONCommitOffchainConfig{}, + expectErr: true, + }, + "fails decoding when some fields are missing": { + want: v1_2_0.JSONCommitOffchainConfig{ + SourceFinalityDepth: 3, + GasPriceHeartBeat: *config.MustNewDuration(1 * time.Hour), + DAGasPriceDeviationPPB: 5e7, + ExecGasPriceDeviationPPB: 5e7, + TokenPriceHeartBeat: *config.MustNewDuration(1 * time.Hour), + TokenPriceDeviationPPB: 5e7, + }, + expectErr: true, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + encode, err := ccipconfig.EncodeOffchainConfig(tc.want) + require.NoError(t, err) + got, err := ccipconfig.DecodeOffchainConfig[v1_2_0.JSONCommitOffchainConfig](encode) + + if tc.expectErr { + require.ErrorContains(t, err, "must set") + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} + +func TestCommitOnchainConfig(t *testing.T) { + tests := []struct { + name string + want ccipdata.CommitOnchainConfig + expectErr bool + }{ + { + name: "encodes and decodes config with all fields set", + want: ccipdata.CommitOnchainConfig{ + PriceRegistry: utils.RandomAddress(), + }, + expectErr: false, + }, + { + name: "encodes and fails decoding config with missing fields", + want: ccipdata.CommitOnchainConfig{}, + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + encoded, err := abihelpers.EncodeAbiStruct(tt.want) + require.NoError(t, err) + + decoded, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](encoded) + if tt.expectErr { + require.ErrorContains(t, err, "must set") + } else { + require.NoError(t, err) + require.Equal(t, tt.want, decoded) + } + }) + } +} + +func TestCommitStoreReaders(t *testing.T) { + user, ec := newSim(t) + ctx := testutils.Context(t) + lggr := logger.TestLogger(t) + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 2, + BackfillBatchSize: 3, + RpcBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + headTracker := headtracker.NewSimulatedHeadTracker(ec, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + if lpOpts.PollPeriod == 0 { + lpOpts.PollPeriod = 1 * time.Hour + } + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), lggr), ec, lggr, headTracker, lpOpts) + + // Deploy 2 commit store versions + onramp1 := utils.RandomAddress() + onramp2 := utils.RandomAddress() + // Report + rep := cciptypes.CommitStoreReport{ + TokenPrices: []cciptypes.TokenPrice{{Token: ccipcalc.EvmAddrToGeneric(utils.RandomAddress()), Value: big.NewInt(1)}}, + GasPrices: []cciptypes.GasPrice{{DestChainSelector: 1, Value: big.NewInt(1)}}, + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + MerkleRoot: common.HexToHash("0x1"), + } + er := big.NewInt(1) + armAddr, _, arm, err := mock_arm_contract.DeployMockARMContract(user, ec) + require.NoError(t, err) + addr, _, ch, err := commit_store_helper_1_0_0.DeployCommitStoreHelper(user, ec, commit_store_helper_1_0_0.CommitStoreStaticConfig{ + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onramp1, + ArmProxy: armAddr, + }) + require.NoError(t, err) + addr2, _, ch2, err := commit_store_helper_1_2_0.DeployCommitStoreHelper(user, ec, commit_store_helper_1_2_0.CommitStoreStaticConfig{ + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onramp2, + ArmProxy: armAddr, + }) + require.NoError(t, err) + commitAndGetBlockTs(ec) // Deploy these + pr, _, _, err := price_registry_1_0_0.DeployPriceRegistry(user, ec, []common.Address{addr}, nil, 1e6) + require.NoError(t, err) + pr2, _, _, err := price_registry_1_2_0.DeployPriceRegistry(user, ec, []common.Address{addr2}, nil, 1e6) + require.NoError(t, err) + commitAndGetBlockTs(ec) // Deploy these + ge := new(gasmocks.EvmFeeEstimator) + lm := new(rollupMocks.L1Oracle) + ge.On("L1Oracle").Return(lm) + + maxGasPrice := big.NewInt(1e8) + c10r, err := factory.NewCommitStoreReader(lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr), ec, lp) // ge, maxGasPrice + require.NoError(t, err) + err = c10r.SetGasEstimator(ctx, ge) + require.NoError(t, err) + err = c10r.SetSourceMaxGasPrice(ctx, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, reflect.TypeOf(c10r).String(), reflect.TypeOf(&v1_0_0.CommitStore{}).String()) + c12r, err := factory.NewCommitStoreReader(lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr2), ec, lp) + require.NoError(t, err) + err = c12r.SetGasEstimator(ctx, ge) + require.NoError(t, err) + err = c12r.SetSourceMaxGasPrice(ctx, maxGasPrice) + require.NoError(t, err) + assert.Equal(t, reflect.TypeOf(c12r).String(), reflect.TypeOf(&v1_2_0.CommitStore{}).String()) + + // Apply config + signers := []common.Address{utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress()} + transmitters := []common.Address{utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress(), utils.RandomAddress()} + onchainConfig, err := abihelpers.EncodeAbiStruct[ccipdata.CommitOnchainConfig](ccipdata.CommitOnchainConfig{ + PriceRegistry: pr, + }) + require.NoError(t, err) + + sourceFinalityDepth := uint32(1) + destFinalityDepth := uint32(2) + commonOffchain := cciptypes.CommitOffchainConfig{ + GasPriceDeviationPPB: 1e6, + GasPriceHeartBeat: 1 * time.Hour, + TokenPriceDeviationPPB: 1e6, + TokenPriceHeartBeat: 1 * time.Hour, + InflightCacheExpiry: 3 * time.Hour, + PriceReportingDisabled: false, + } + offchainConfig, err := ccipconfig.EncodeOffchainConfig[v1_0_0.CommitOffchainConfig](v1_0_0.CommitOffchainConfig{ + SourceFinalityDepth: sourceFinalityDepth, + DestFinalityDepth: destFinalityDepth, + FeeUpdateHeartBeat: *config.MustNewDuration(commonOffchain.GasPriceHeartBeat), + FeeUpdateDeviationPPB: commonOffchain.GasPriceDeviationPPB, + InflightCacheExpiry: *config.MustNewDuration(commonOffchain.InflightCacheExpiry), + }) + require.NoError(t, err) + _, err = ch.SetOCR2Config(user, signers, transmitters, 1, onchainConfig, 1, []byte{}) + require.NoError(t, err) + onchainConfig2, err := abihelpers.EncodeAbiStruct[ccipdata.CommitOnchainConfig](ccipdata.CommitOnchainConfig{ + PriceRegistry: pr2, + }) + require.NoError(t, err) + offchainConfig2, err := ccipconfig.EncodeOffchainConfig[v1_2_0.JSONCommitOffchainConfig](v1_2_0.JSONCommitOffchainConfig{ + SourceFinalityDepth: sourceFinalityDepth, + DestFinalityDepth: destFinalityDepth, + GasPriceHeartBeat: *config.MustNewDuration(commonOffchain.GasPriceHeartBeat), + DAGasPriceDeviationPPB: 1e7, + ExecGasPriceDeviationPPB: commonOffchain.GasPriceDeviationPPB, + TokenPriceDeviationPPB: commonOffchain.TokenPriceDeviationPPB, + TokenPriceHeartBeat: *config.MustNewDuration(commonOffchain.TokenPriceHeartBeat), + InflightCacheExpiry: *config.MustNewDuration(commonOffchain.InflightCacheExpiry), + }) + require.NoError(t, err) + _, err = ch2.SetOCR2Config(user, signers, transmitters, 1, onchainConfig2, 1, []byte{}) + require.NoError(t, err) + commitAndGetBlockTs(ec) + + // Apply report + b, err := c10r.EncodeCommitReport(ctx, rep) + require.NoError(t, err) + _, err = ch.Report(user, b, er) + require.NoError(t, err) + b, err = c12r.EncodeCommitReport(ctx, rep) + require.NoError(t, err) + _, err = ch2.Report(user, b, er) + require.NoError(t, err) + commitAndGetBlockTs(ec) + + // Capture all logs. + lp.PollAndSaveLogs(context.Background(), 1) + + configs := map[string][][]byte{ + ccipdata.V1_0_0: {onchainConfig, offchainConfig}, + ccipdata.V1_2_0: {onchainConfig2, offchainConfig2}, + } + crs := map[string]ccipdata.CommitStoreReader{ + ccipdata.V1_0_0: c10r, + ccipdata.V1_2_0: c12r, + } + prs := map[string]common.Address{ + ccipdata.V1_0_0: pr, + ccipdata.V1_2_0: pr2, + } + gasPrice := big.NewInt(10) + daPrice := big.NewInt(20) + ge.On("GetFee", mock.Anything, mock.Anything, mock.Anything, assets.NewWei(maxGasPrice)).Return(gas.EvmFee{Legacy: assets.NewWei(gasPrice)}, uint64(0), nil) + lm.On("GasPrice", mock.Anything).Return(assets.NewWei(daPrice), nil) + + for v, cr := range crs { + cr := cr + t.Run("CommitStoreReader "+v, func(t *testing.T) { + // Static config. + cfg, err := cr.GetCommitStoreStaticConfig(context.Background()) + require.NoError(t, err) + require.NotNil(t, cfg) + + // Assert encoding + b, err := cr.EncodeCommitReport(ctx, rep) + require.NoError(t, err) + d, err := cr.DecodeCommitReport(ctx, b) + require.NoError(t, err) + assert.Equal(t, d, rep) + + // Assert reading + latest, err := cr.GetLatestPriceEpochAndRound(context.Background()) + require.NoError(t, err) + assert.Equal(t, er.Uint64(), latest) + + // Assert cursing + down, err := cr.IsDown(context.Background()) + require.NoError(t, err) + assert.False(t, down) + _, err = arm.VoteToCurse(user, [32]byte{}) + require.NoError(t, err) + ec.Commit() + down, err = cr.IsDown(context.Background()) + require.NoError(t, err) + assert.True(t, down) + _, err = arm.OwnerUnvoteToCurse0(user, nil) + require.NoError(t, err) + ec.Commit() + + seqNr, err := cr.GetExpectedNextSequenceNumber(context.Background()) + require.NoError(t, err) + assert.Equal(t, rep.Interval.Max+1, seqNr) + + reps, err := cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Max+1, 0) + require.NoError(t, err) + assert.Len(t, reps, 0) + + reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Max, 0) + require.NoError(t, err) + require.Len(t, reps, 1) + assert.Equal(t, reps[0].Interval, rep.Interval) + assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot) + assert.Equal(t, reps[0].GasPrices, rep.GasPrices) + assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices) + + reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Min, 0) + require.NoError(t, err) + require.Len(t, reps, 1) + assert.Equal(t, reps[0].Interval, rep.Interval) + assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot) + assert.Equal(t, reps[0].GasPrices, rep.GasPrices) + assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices) + + reps, err = cr.GetCommitReportMatchingSeqNum(context.Background(), rep.Interval.Min-1, 0) + require.NoError(t, err) + require.Len(t, reps, 0) + + // Sanity + reps, err = cr.GetAcceptedCommitReportsGteTimestamp(context.Background(), time.Unix(0, 0), 0) + require.NoError(t, err) + require.Len(t, reps, 1) + assert.Equal(t, reps[0].Interval, rep.Interval) + assert.Equal(t, reps[0].MerkleRoot, rep.MerkleRoot) + assert.Equal(t, reps[0].GasPrices, rep.GasPrices) + assert.Equal(t, reps[0].TokenPrices, rep.TokenPrices) + + // Until we detect the config, we'll have empty offchain config + c1, err := cr.OffchainConfig(ctx) + require.NoError(t, err) + assert.Equal(t, c1, cciptypes.CommitOffchainConfig{}) + newPr, err := cr.ChangeConfig(ctx, configs[v][0], configs[v][1]) + require.NoError(t, err) + assert.Equal(t, ccipcalc.EvmAddrToGeneric(prs[v]), newPr) + + c2, err := cr.OffchainConfig(ctx) + require.NoError(t, err) + assert.Equal(t, commonOffchain, c2) + // We should be able to query for gas prices now. + gpe, err := cr.GasPriceEstimator(ctx) + require.NoError(t, err) + gp, err := gpe.GetGasPrice(context.Background()) + require.NoError(t, err) + assert.True(t, gp.Cmp(big.NewInt(0)) > 0) + }) + } +} + +func TestNewCommitStoreReader(t *testing.T) { + var tt = []struct { + typeAndVersion string + expectedErr string + }{ + { + typeAndVersion: "blah", + expectedErr: "unable to read type and version: invalid type and version blah", + }, + { + typeAndVersion: "EVM2EVMOffRamp 1.0.0", + expectedErr: "expected CommitStore got EVM2EVMOffRamp", + }, + { + typeAndVersion: "CommitStore 1.2.0", + expectedErr: "", + }, + { + typeAndVersion: "CommitStore 2.0.0", + expectedErr: "unsupported commit store version 2.0.0", + }, + } + for _, tc := range tt { + t.Run(tc.typeAndVersion, func(t *testing.T) { + b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion) + require.NoError(t, err) + c := evmclientmocks.NewClient(t) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil) + addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + lp := lpmocks.NewLogPoller(t) + if tc.expectedErr == "" { + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + } + _, err = factory.NewCommitStoreReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, c, lp) + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go new file mode 100644 index 00000000000..d431d2863a0 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store.go @@ -0,0 +1,121 @@ +package factory + +import ( + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +func NewCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) (ccipdata.CommitStoreReader, error) { + return initOrCloseCommitStoreReader(lggr, versionFinder, address, ec, lp, false) +} + +func CloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) error { + _, err := initOrCloseCommitStoreReader(lggr, versionFinder, address, ec, lp, true) + return err +} + +func initOrCloseCommitStoreReader(lggr logger.Logger, versionFinder VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller, closeReader bool) (ccipdata.CommitStoreReader, error) { + contractType, version, err := versionFinder.TypeAndVersion(address, ec) + if err != nil { + return nil, errors.Wrapf(err, "unable to read type and version") + } + if contractType != ccipconfig.CommitStore { + return nil, errors.Errorf("expected %v got %v", ccipconfig.CommitStore, contractType) + } + + evmAddr, err := ccipcalc.GenericAddrToEvm(address) + if err != nil { + return nil, err + } + + lggr.Infow("Initializing CommitStore Reader", "version", version.String()) + + switch version.String() { + case ccipdata.V1_0_0, ccipdata.V1_1_0: // Versions are identical + cs, err := v1_0_0.NewCommitStore(lggr, evmAddr, ec, lp) + if err != nil { + return nil, err + } + if closeReader { + return nil, cs.Close() + } + return cs, cs.RegisterFilters() + case ccipdata.V1_2_0: + cs, err := v1_2_0.NewCommitStore(lggr, evmAddr, ec, lp) + if err != nil { + return nil, err + } + if closeReader { + return nil, cs.Close() + } + return cs, cs.RegisterFilters() + case ccipdata.V1_5_0: + cs, err := v1_5_0.NewCommitStore(lggr, evmAddr, ec, lp) + if err != nil { + return nil, err + } + if closeReader { + return nil, cs.Close() + } + return cs, cs.RegisterFilters() + default: + return nil, errors.Errorf("unsupported commit store version %v", version.String()) + } +} + +func CommitReportToEthTxMeta(typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) { + if typ != ccipconfig.CommitStore { + return nil, errors.Errorf("expected %v got %v", ccipconfig.CommitStore, typ) + } + switch ver.String() { + case ccipdata.V1_0_0, ccipdata.V1_1_0: + commitStoreABI := abihelpers.MustParseABI(commit_store_1_0_0.CommitStoreABI) + return func(report []byte) (*txmgr.TxMeta, error) { + commitReport, err := v1_0_0.DecodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report) + if err != nil { + return nil, err + } + return commitReportToEthTxMeta(commitReport) + }, nil + case ccipdata.V1_2_0, ccipdata.V1_5_0: + commitStoreABI := abihelpers.MustParseABI(commit_store.CommitStoreABI) + return func(report []byte) (*txmgr.TxMeta, error) { + commitReport, err := v1_2_0.DecodeCommitReport(abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI), report) + if err != nil { + return nil, err + } + return commitReportToEthTxMeta(commitReport) + }, nil + default: + return nil, errors.Errorf("got unexpected version %v", ver.String()) + } +} + +// CommitReportToEthTxMeta generates a txmgr.EthTxMeta from the given commit report. +// sequence numbers of the committed messages will be added to tx metadata +func commitReportToEthTxMeta(commitReport cciptypes.CommitStoreReport) (*txmgr.TxMeta, error) { + n := (commitReport.Interval.Max - commitReport.Interval.Min) + 1 + seqRange := make([]uint64, n) + for i := uint64(0); i < n; i++ { + seqRange[i] = i + commitReport.Interval.Min + } + return &txmgr.TxMeta{ + SeqNumbers: seqRange, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go new file mode 100644 index 00000000000..e1b8ff929c3 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/commit_store_test.go @@ -0,0 +1,37 @@ +package factory + +import ( + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +func TestCommitStore(t *testing.T) { + for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} { + lggr := logger.TestLogger(t) + addr := cciptypes.Address(utils.RandomAddress().String()) + lp := mocks2.NewLogPoller(t) + + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + versionFinder := newMockVersionFinder(ccipconfig.CommitStore, *semver.MustParse(versionStr), nil) + _, err := NewCommitStoreReader(lggr, versionFinder, addr, nil, lp) + assert.NoError(t, err) + + expFilterName := logpoller.FilterName(v1_0_0.EXEC_REPORT_ACCEPTS, addr) + lp.On("UnregisterFilter", mock.Anything, expFilterName).Return(nil) + err = CloseCommitStoreReader(lggr, versionFinder, addr, nil, lp) + assert.NoError(t, err) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go new file mode 100644 index 00000000000..c6fa63ee820 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp.go @@ -0,0 +1,125 @@ +package factory + +import ( + "context" + "math/big" + + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +func NewOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, registerFilters bool) (ccipdata.OffRampReader, error) { + return initOrCloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, false, registerFilters) +} + +func CloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) error { + _, err := initOrCloseOffRampReader(lggr, versionFinder, addr, destClient, lp, estimator, destMaxGasPrice, true, false) + return err +} + +func initOrCloseOffRampReader(lggr logger.Logger, versionFinder VersionFinder, addr cciptypes.Address, destClient client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int, closeReader bool, registerFilters bool) (ccipdata.OffRampReader, error) { + contractType, version, err := versionFinder.TypeAndVersion(addr, destClient) + if err != nil { + return nil, errors.Wrapf(err, "unable to read type and version") + } + if contractType != ccipconfig.EVM2EVMOffRamp { + return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOffRamp, contractType) + } + + evmAddr, err := ccipcalc.GenericAddrToEvm(addr) + if err != nil { + return nil, err + } + + lggr.Infow("Initializing OffRamp Reader", "version", version.String(), "destMaxGasPrice", destMaxGasPrice.String()) + + switch version.String() { + case ccipdata.V1_0_0, ccipdata.V1_1_0: + offRamp, err := v1_0_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice) + if err != nil { + return nil, err + } + if closeReader { + return nil, offRamp.Close() + } + return offRamp, offRamp.RegisterFilters() + case ccipdata.V1_2_0: + offRamp, err := v1_2_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice) + if err != nil { + return nil, err + } + if closeReader { + return nil, offRamp.Close() + } + return offRamp, offRamp.RegisterFilters() + case ccipdata.V1_5_0: + offRamp, err := v1_5_0.NewOffRamp(lggr, evmAddr, destClient, lp, estimator, destMaxGasPrice) + if err != nil { + return nil, err + } + if closeReader { + return nil, offRamp.Close() + } + return offRamp, offRamp.RegisterFilters() + default: + return nil, errors.Errorf("unsupported offramp version %v", version.String()) + } + // TODO can validate it pointing to the correct version +} + +func ExecReportToEthTxMeta(ctx context.Context, typ ccipconfig.ContractType, ver semver.Version) (func(report []byte) (*txmgr.TxMeta, error), error) { + if typ != ccipconfig.EVM2EVMOffRamp { + return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOffRamp, typ) + } + switch ver.String() { + case ccipdata.V1_0_0, ccipdata.V1_1_0: + offRampABI := abihelpers.MustParseABI(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampABI) + return func(report []byte) (*txmgr.TxMeta, error) { + execReport, err := v1_0_0.DecodeExecReport(ctx, abihelpers.MustGetMethodInputs(ccipdata.ManuallyExecute, offRampABI)[:1], report) + if err != nil { + return nil, err + } + return execReportToEthTxMeta(execReport) + }, nil + case ccipdata.V1_2_0, ccipdata.V1_5_0: + offRampABI := abihelpers.MustParseABI(evm_2_evm_offramp.EVM2EVMOffRampABI) + return func(report []byte) (*txmgr.TxMeta, error) { + execReport, err := v1_2_0.DecodeExecReport(ctx, abihelpers.MustGetMethodInputs(ccipdata.ManuallyExecute, offRampABI)[:1], report) + if err != nil { + return nil, err + } + return execReportToEthTxMeta(execReport) + }, nil + default: + return nil, errors.Errorf("got unexpected version %v", ver.String()) + } +} + +func execReportToEthTxMeta(execReport cciptypes.ExecReport) (*txmgr.TxMeta, error) { + msgIDs := make([]string, len(execReport.Messages)) + for i, msg := range execReport.Messages { + msgIDs[i] = hexutil.Encode(msg.MessageID[:]) + } + + return &txmgr.TxMeta{ + MessageIDs: msgIDs, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go new file mode 100644 index 00000000000..4b9e57ecfbd --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/offramp_test.go @@ -0,0 +1,44 @@ +package factory + +import ( + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +func TestOffRamp(t *testing.T) { + for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} { + lggr := logger.TestLogger(t) + addr := cciptypes.Address(utils.RandomAddress().String()) + lp := mocks2.NewLogPoller(t) + + expFilterNames := []string{ + logpoller.FilterName(v1_0_0.EXEC_EXECUTION_STATE_CHANGES, addr), + logpoller.FilterName(v1_0_0.EXEC_TOKEN_POOL_ADDED, addr), + logpoller.FilterName(v1_0_0.EXEC_TOKEN_POOL_REMOVED, addr), + } + versionFinder := newMockVersionFinder(ccipconfig.EVM2EVMOffRamp, *semver.MustParse(versionStr), nil) + + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames)) + _, err := NewOffRampReader(lggr, versionFinder, addr, nil, lp, nil, nil, true) + assert.NoError(t, err) + + for _, f := range expFilterNames { + lp.On("UnregisterFilter", mock.Anything, f).Return(nil) + } + err = CloseOffRampReader(lggr, versionFinder, addr, nil, lp, nil, nil) + assert.NoError(t, err) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go new file mode 100644 index 00000000000..e82584ac7cc --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp.go @@ -0,0 +1,88 @@ +package factory + +import ( + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +// NewOnRampReader determines the appropriate version of the onramp and returns a reader for it +func NewOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client) (ccipdata.OnRampReader, error) { + return initOrCloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source, false) +} + +func CloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client) error { + _, err := initOrCloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, onRampAddress, sourceLP, source, true) + return err +} + +func initOrCloseOnRampReader(lggr logger.Logger, versionFinder VersionFinder, sourceSelector, destSelector uint64, onRampAddress cciptypes.Address, sourceLP logpoller.LogPoller, source client.Client, closeReader bool) (ccipdata.OnRampReader, error) { + contractType, version, err := versionFinder.TypeAndVersion(onRampAddress, source) + if err != nil { + return nil, errors.Wrapf(err, "unable to read type and version") + } + if contractType != ccipconfig.EVM2EVMOnRamp { + return nil, errors.Errorf("expected %v got %v", ccipconfig.EVM2EVMOnRamp, contractType) + } + + onRampAddrEvm, err := ccipcalc.GenericAddrToEvm(onRampAddress) + if err != nil { + return nil, err + } + + lggr.Infof("Initializing onRamp for version %v", version.String()) + + switch version.String() { + case ccipdata.V1_0_0: + onRamp, err := v1_0_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source) + if err != nil { + return nil, err + } + if closeReader { + return nil, onRamp.Close() + } + return onRamp, onRamp.RegisterFilters() + case ccipdata.V1_1_0: + onRamp, err := v1_1_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source) + if err != nil { + return nil, err + } + if closeReader { + return nil, onRamp.Close() + } + return onRamp, onRamp.RegisterFilters() + case ccipdata.V1_2_0: + onRamp, err := v1_2_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source) + if err != nil { + return nil, err + } + if closeReader { + return nil, onRamp.Close() + } + return onRamp, onRamp.RegisterFilters() + case ccipdata.V1_5_0: + onRamp, err := v1_5_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddrEvm, sourceLP, source) + if err != nil { + return nil, err + } + if closeReader { + return nil, onRamp.Close() + } + return onRamp, onRamp.RegisterFilters() + // Adding a new version? + // Please update the public factory function in leafer.go if the new version updates the leaf hash function. + default: + return nil, errors.Errorf("unsupported onramp version %v", version.String()) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go new file mode 100644 index 00000000000..8cf47ddc7be --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/onramp_test.go @@ -0,0 +1,45 @@ +package factory + +import ( + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +func TestOnRamp(t *testing.T) { + for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_1_0, ccipdata.V1_2_0, ccipdata.V1_5_0} { + lggr := logger.TestLogger(t) + addr := cciptypes.Address(utils.RandomAddress().String()) + lp := mocks2.NewLogPoller(t) + + sourceSelector := uint64(1000) + destSelector := uint64(2000) + + expFilterNames := []string{ + logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, addr), + logpoller.FilterName(ccipdata.CONFIG_CHANGED, addr), + } + versionFinder := newMockVersionFinder(ccipconfig.EVM2EVMOnRamp, *semver.MustParse(versionStr), nil) + + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames)) + _, err := NewOnRampReader(lggr, versionFinder, sourceSelector, destSelector, addr, lp, nil) + assert.NoError(t, err) + + for _, f := range expFilterNames { + lp.On("UnregisterFilter", mock.Anything, f).Return(nil) + } + err = CloseOnRampReader(lggr, versionFinder, sourceSelector, destSelector, addr, lp, nil) + assert.NoError(t, err) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go new file mode 100644 index 00000000000..f1fa7c4e81a --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry.go @@ -0,0 +1,82 @@ +package factory + +import ( + "context" + + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +// NewPriceRegistryReader determines the appropriate version of the price registry and returns a reader for it. +func NewPriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client) (ccipdata.PriceRegistryReader, error) { + return initOrClosePriceRegistryReader(ctx, lggr, versionFinder, priceRegistryAddress, lp, cl, false) +} + +func ClosePriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client) error { + _, err := initOrClosePriceRegistryReader(ctx, lggr, versionFinder, priceRegistryAddress, lp, cl, true) + return err +} + +func initOrClosePriceRegistryReader(ctx context.Context, lggr logger.Logger, versionFinder VersionFinder, priceRegistryAddress cciptypes.Address, lp logpoller.LogPoller, cl client.Client, closeReader bool) (ccipdata.PriceRegistryReader, error) { + registerFilters := !closeReader + + priceRegistryEvmAddr, err := ccipcalc.GenericAddrToEvm(priceRegistryAddress) + if err != nil { + return nil, err + } + + contractType, version, err := versionFinder.TypeAndVersion(priceRegistryAddress, cl) + isV1_0_0 := ccipcommon.IsTxRevertError(err) || (contractType == ccipconfig.PriceRegistry && version.String() == ccipdata.V1_0_0) + if isV1_0_0 { + lggr.Infof("Assuming %v is 1.0.0 price registry, got %v", priceRegistryEvmAddr, err) + // Unfortunately the v1 price registry doesn't have a method to get the version so assume if it reverts its v1. + pr, err2 := v1_0_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters) + if err2 != nil { + return nil, err2 + } + if closeReader { + return nil, pr.Close() + } + return pr, nil + } + if err != nil { + return nil, errors.Wrapf(err, "unable to read type and version") + } + + if contractType != ccipconfig.PriceRegistry { + return nil, errors.Errorf("expected %v got %v", ccipconfig.PriceRegistry, contractType) + } + switch version.String() { + case ccipdata.V1_2_0: + pr, err := v1_2_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters) + if err != nil { + return nil, err + } + if closeReader { + return nil, pr.Close() + } + return pr, nil + case ccipdata.V1_6_0: + pr, err := v1_2_0.NewPriceRegistry(lggr, priceRegistryEvmAddr, lp, cl, registerFilters) + if err != nil { + return nil, err + } + if closeReader { + return nil, pr.Close() + } + return pr, nil + default: + return nil, errors.Errorf("unsupported price registry version %v", version.String()) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go new file mode 100644 index 00000000000..b4a9d307147 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/price_registry_test.go @@ -0,0 +1,46 @@ +package factory + +import ( + "testing" + + "github.com/Masterminds/semver/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + mocks2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +func TestPriceRegistry(t *testing.T) { + ctx := testutils.Context(t) + + for _, versionStr := range []string{ccipdata.V1_0_0, ccipdata.V1_2_0} { + lggr := logger.TestLogger(t) + addr := cciptypes.Address(utils.RandomAddress().String()) + lp := mocks2.NewLogPoller(t) + + expFilterNames := []string{ + logpoller.FilterName(ccipdata.COMMIT_PRICE_UPDATES, addr), + logpoller.FilterName(ccipdata.FEE_TOKEN_ADDED, addr), + logpoller.FilterName(ccipdata.FEE_TOKEN_REMOVED, addr), + } + versionFinder := newMockVersionFinder(ccipconfig.PriceRegistry, *semver.MustParse(versionStr), nil) + + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Times(len(expFilterNames)) + _, err := NewPriceRegistryReader(ctx, lggr, versionFinder, addr, lp, nil) + assert.NoError(t, err) + + for _, f := range expFilterNames { + lp.On("UnregisterFilter", mock.Anything, f).Return(nil) + } + err = ClosePriceRegistryReader(ctx, lggr, versionFinder, addr, lp, nil) + assert.NoError(t, err) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go new file mode 100644 index 00000000000..ac16fc4df2f --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/factory/versionfinder.go @@ -0,0 +1,44 @@ +package factory + +import ( + "github.com/Masterminds/semver/v3" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +// VersionFinder accepts a contract address and a client and performs an on-chain call to +// determine the contract type. +type VersionFinder interface { + TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error) +} + +type EvmVersionFinder struct{} + +func NewEvmVersionFinder() EvmVersionFinder { + return EvmVersionFinder{} +} + +func (e EvmVersionFinder) TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error) { + evmAddr, err := ccipcalc.GenericAddrToEvm(addr) + if err != nil { + return "", semver.Version{}, err + } + return config.TypeAndVersion(evmAddr, client) +} + +type mockVersionFinder struct { + typ config.ContractType + version semver.Version + err error +} + +func newMockVersionFinder(typ config.ContractType, version semver.Version, err error) *mockVersionFinder { + return &mockVersionFinder{typ: typ, version: version, err: err} +} + +func (m mockVersionFinder) TypeAndVersion(addr cciptypes.Address, client bind.ContractBackend) (config.ContractType, semver.Version, error) { + return m.typ, m.version, m.err +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go new file mode 100644 index 00000000000..f383a87a8a9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/commit_store_reader_mock.go @@ -0,0 +1,985 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + context "context" + + gas "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// CommitStoreReader is an autogenerated mock type for the CommitStoreReader type +type CommitStoreReader struct { + mock.Mock +} + +type CommitStoreReader_Expecter struct { + mock *mock.Mock +} + +func (_m *CommitStoreReader) EXPECT() *CommitStoreReader_Expecter { + return &CommitStoreReader_Expecter{mock: &_m.Mock} +} + +// ChangeConfig provides a mock function with given fields: ctx, onchainConfig, offchainConfig +func (_m *CommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (ccip.Address, error) { + ret := _m.Called(ctx, onchainConfig, offchainConfig) + + if len(ret) == 0 { + panic("no return value specified for ChangeConfig") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) (ccip.Address, error)); ok { + return rf(ctx, onchainConfig, offchainConfig) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) ccip.Address); ok { + r0 = rf(ctx, onchainConfig, offchainConfig) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte) error); ok { + r1 = rf(ctx, onchainConfig, offchainConfig) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_ChangeConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeConfig' +type CommitStoreReader_ChangeConfig_Call struct { + *mock.Call +} + +// ChangeConfig is a helper method to define mock.On call +// - ctx context.Context +// - onchainConfig []byte +// - offchainConfig []byte +func (_e *CommitStoreReader_Expecter) ChangeConfig(ctx interface{}, onchainConfig interface{}, offchainConfig interface{}) *CommitStoreReader_ChangeConfig_Call { + return &CommitStoreReader_ChangeConfig_Call{Call: _e.mock.On("ChangeConfig", ctx, onchainConfig, offchainConfig)} +} + +func (_c *CommitStoreReader_ChangeConfig_Call) Run(run func(ctx context.Context, onchainConfig []byte, offchainConfig []byte)) *CommitStoreReader_ChangeConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]byte), args[2].([]byte)) + }) + return _c +} + +func (_c *CommitStoreReader_ChangeConfig_Call) Return(_a0 ccip.Address, _a1 error) *CommitStoreReader_ChangeConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_ChangeConfig_Call) RunAndReturn(run func(context.Context, []byte, []byte) (ccip.Address, error)) *CommitStoreReader_ChangeConfig_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *CommitStoreReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitStoreReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type CommitStoreReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *CommitStoreReader_Expecter) Close() *CommitStoreReader_Close_Call { + return &CommitStoreReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *CommitStoreReader_Close_Call) Run(run func()) *CommitStoreReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *CommitStoreReader_Close_Call) Return(_a0 error) *CommitStoreReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CommitStoreReader_Close_Call) RunAndReturn(run func() error) *CommitStoreReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// DecodeCommitReport provides a mock function with given fields: ctx, report +func (_m *CommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (ccip.CommitStoreReport, error) { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for DecodeCommitReport") + } + + var r0 ccip.CommitStoreReport + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte) (ccip.CommitStoreReport, error)); ok { + return rf(ctx, report) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte) ccip.CommitStoreReport); ok { + r0 = rf(ctx, report) + } else { + r0 = ret.Get(0).(ccip.CommitStoreReport) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, report) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_DecodeCommitReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeCommitReport' +type CommitStoreReader_DecodeCommitReport_Call struct { + *mock.Call +} + +// DecodeCommitReport is a helper method to define mock.On call +// - ctx context.Context +// - report []byte +func (_e *CommitStoreReader_Expecter) DecodeCommitReport(ctx interface{}, report interface{}) *CommitStoreReader_DecodeCommitReport_Call { + return &CommitStoreReader_DecodeCommitReport_Call{Call: _e.mock.On("DecodeCommitReport", ctx, report)} +} + +func (_c *CommitStoreReader_DecodeCommitReport_Call) Run(run func(ctx context.Context, report []byte)) *CommitStoreReader_DecodeCommitReport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]byte)) + }) + return _c +} + +func (_c *CommitStoreReader_DecodeCommitReport_Call) Return(_a0 ccip.CommitStoreReport, _a1 error) *CommitStoreReader_DecodeCommitReport_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_DecodeCommitReport_Call) RunAndReturn(run func(context.Context, []byte) (ccip.CommitStoreReport, error)) *CommitStoreReader_DecodeCommitReport_Call { + _c.Call.Return(run) + return _c +} + +// EncodeCommitReport provides a mock function with given fields: ctx, report +func (_m *CommitStoreReader) EncodeCommitReport(ctx context.Context, report ccip.CommitStoreReport) ([]byte, error) { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for EncodeCommitReport") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ccip.CommitStoreReport) ([]byte, error)); ok { + return rf(ctx, report) + } + if rf, ok := ret.Get(0).(func(context.Context, ccip.CommitStoreReport) []byte); ok { + r0 = rf(ctx, report) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ccip.CommitStoreReport) error); ok { + r1 = rf(ctx, report) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_EncodeCommitReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EncodeCommitReport' +type CommitStoreReader_EncodeCommitReport_Call struct { + *mock.Call +} + +// EncodeCommitReport is a helper method to define mock.On call +// - ctx context.Context +// - report ccip.CommitStoreReport +func (_e *CommitStoreReader_Expecter) EncodeCommitReport(ctx interface{}, report interface{}) *CommitStoreReader_EncodeCommitReport_Call { + return &CommitStoreReader_EncodeCommitReport_Call{Call: _e.mock.On("EncodeCommitReport", ctx, report)} +} + +func (_c *CommitStoreReader_EncodeCommitReport_Call) Run(run func(ctx context.Context, report ccip.CommitStoreReport)) *CommitStoreReader_EncodeCommitReport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ccip.CommitStoreReport)) + }) + return _c +} + +func (_c *CommitStoreReader_EncodeCommitReport_Call) Return(_a0 []byte, _a1 error) *CommitStoreReader_EncodeCommitReport_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_EncodeCommitReport_Call) RunAndReturn(run func(context.Context, ccip.CommitStoreReport) ([]byte, error)) *CommitStoreReader_EncodeCommitReport_Call { + _c.Call.Return(run) + return _c +} + +// GasPriceEstimator provides a mock function with given fields: ctx +func (_m *CommitStoreReader) GasPriceEstimator(ctx context.Context) (ccip.GasPriceEstimatorCommit, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GasPriceEstimator") + } + + var r0 ccip.GasPriceEstimatorCommit + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.GasPriceEstimatorCommit, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.GasPriceEstimatorCommit); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ccip.GasPriceEstimatorCommit) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GasPriceEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPriceEstimator' +type CommitStoreReader_GasPriceEstimator_Call struct { + *mock.Call +} + +// GasPriceEstimator is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) GasPriceEstimator(ctx interface{}) *CommitStoreReader_GasPriceEstimator_Call { + return &CommitStoreReader_GasPriceEstimator_Call{Call: _e.mock.On("GasPriceEstimator", ctx)} +} + +func (_c *CommitStoreReader_GasPriceEstimator_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GasPriceEstimator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_GasPriceEstimator_Call) Return(_a0 ccip.GasPriceEstimatorCommit, _a1 error) *CommitStoreReader_GasPriceEstimator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GasPriceEstimator_Call) RunAndReturn(run func(context.Context) (ccip.GasPriceEstimatorCommit, error)) *CommitStoreReader_GasPriceEstimator_Call { + _c.Call.Return(run) + return _c +} + +// GetAcceptedCommitReportsGteTimestamp provides a mock function with given fields: ctx, ts, confirmations +func (_m *CommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]ccip.CommitStoreReportWithTxMeta, error) { + ret := _m.Called(ctx, ts, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetAcceptedCommitReportsGteTimestamp") + } + + var r0 []ccip.CommitStoreReportWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.CommitStoreReportWithTxMeta, error)); ok { + return rf(ctx, ts, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.CommitStoreReportWithTxMeta); ok { + r0 = rf(ctx, ts, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.CommitStoreReportWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok { + r1 = rf(ctx, ts, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAcceptedCommitReportsGteTimestamp' +type CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call struct { + *mock.Call +} + +// GetAcceptedCommitReportsGteTimestamp is a helper method to define mock.On call +// - ctx context.Context +// - ts time.Time +// - confirmations int +func (_e *CommitStoreReader_Expecter) GetAcceptedCommitReportsGteTimestamp(ctx interface{}, ts interface{}, confirmations interface{}) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call { + return &CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call{Call: _e.mock.On("GetAcceptedCommitReportsGteTimestamp", ctx, ts, confirmations)} +} + +func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(time.Time), args[2].(int)) + }) + return _c +} + +func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) Return(_a0 []ccip.CommitStoreReportWithTxMeta, _a1 error) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.CommitStoreReportWithTxMeta, error)) *CommitStoreReader_GetAcceptedCommitReportsGteTimestamp_Call { + _c.Call.Return(run) + return _c +} + +// GetCommitReportMatchingSeqNum provides a mock function with given fields: ctx, seqNum, confirmations +func (_m *CommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]ccip.CommitStoreReportWithTxMeta, error) { + ret := _m.Called(ctx, seqNum, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetCommitReportMatchingSeqNum") + } + + var r0 []ccip.CommitStoreReportWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, int) ([]ccip.CommitStoreReportWithTxMeta, error)); ok { + return rf(ctx, seqNum, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, int) []ccip.CommitStoreReportWithTxMeta); ok { + r0 = rf(ctx, seqNum, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.CommitStoreReportWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, int) error); ok { + r1 = rf(ctx, seqNum, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GetCommitReportMatchingSeqNum_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCommitReportMatchingSeqNum' +type CommitStoreReader_GetCommitReportMatchingSeqNum_Call struct { + *mock.Call +} + +// GetCommitReportMatchingSeqNum is a helper method to define mock.On call +// - ctx context.Context +// - seqNum uint64 +// - confirmations int +func (_e *CommitStoreReader_Expecter) GetCommitReportMatchingSeqNum(ctx interface{}, seqNum interface{}, confirmations interface{}) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call { + return &CommitStoreReader_GetCommitReportMatchingSeqNum_Call{Call: _e.mock.On("GetCommitReportMatchingSeqNum", ctx, seqNum, confirmations)} +} + +func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) Run(run func(ctx context.Context, seqNum uint64, confirmations int)) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(int)) + }) + return _c +} + +func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) Return(_a0 []ccip.CommitStoreReportWithTxMeta, _a1 error) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GetCommitReportMatchingSeqNum_Call) RunAndReturn(run func(context.Context, uint64, int) ([]ccip.CommitStoreReportWithTxMeta, error)) *CommitStoreReader_GetCommitReportMatchingSeqNum_Call { + _c.Call.Return(run) + return _c +} + +// GetCommitStoreStaticConfig provides a mock function with given fields: ctx +func (_m *CommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (ccip.CommitStoreStaticConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetCommitStoreStaticConfig") + } + + var r0 ccip.CommitStoreStaticConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.CommitStoreStaticConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.CommitStoreStaticConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.CommitStoreStaticConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GetCommitStoreStaticConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCommitStoreStaticConfig' +type CommitStoreReader_GetCommitStoreStaticConfig_Call struct { + *mock.Call +} + +// GetCommitStoreStaticConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) GetCommitStoreStaticConfig(ctx interface{}) *CommitStoreReader_GetCommitStoreStaticConfig_Call { + return &CommitStoreReader_GetCommitStoreStaticConfig_Call{Call: _e.mock.On("GetCommitStoreStaticConfig", ctx)} +} + +func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetCommitStoreStaticConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) Return(_a0 ccip.CommitStoreStaticConfig, _a1 error) *CommitStoreReader_GetCommitStoreStaticConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GetCommitStoreStaticConfig_Call) RunAndReturn(run func(context.Context) (ccip.CommitStoreStaticConfig, error)) *CommitStoreReader_GetCommitStoreStaticConfig_Call { + _c.Call.Return(run) + return _c +} + +// GetExpectedNextSequenceNumber provides a mock function with given fields: ctx +func (_m *CommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetExpectedNextSequenceNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GetExpectedNextSequenceNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExpectedNextSequenceNumber' +type CommitStoreReader_GetExpectedNextSequenceNumber_Call struct { + *mock.Call +} + +// GetExpectedNextSequenceNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) GetExpectedNextSequenceNumber(ctx interface{}) *CommitStoreReader_GetExpectedNextSequenceNumber_Call { + return &CommitStoreReader_GetExpectedNextSequenceNumber_Call{Call: _e.mock.On("GetExpectedNextSequenceNumber", ctx)} +} + +func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetExpectedNextSequenceNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) Return(_a0 uint64, _a1 error) *CommitStoreReader_GetExpectedNextSequenceNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GetExpectedNextSequenceNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *CommitStoreReader_GetExpectedNextSequenceNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetLatestPriceEpochAndRound provides a mock function with given fields: ctx +func (_m *CommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestPriceEpochAndRound") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_GetLatestPriceEpochAndRound_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestPriceEpochAndRound' +type CommitStoreReader_GetLatestPriceEpochAndRound_Call struct { + *mock.Call +} + +// GetLatestPriceEpochAndRound is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) GetLatestPriceEpochAndRound(ctx interface{}) *CommitStoreReader_GetLatestPriceEpochAndRound_Call { + return &CommitStoreReader_GetLatestPriceEpochAndRound_Call{Call: _e.mock.On("GetLatestPriceEpochAndRound", ctx)} +} + +func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) Run(run func(ctx context.Context)) *CommitStoreReader_GetLatestPriceEpochAndRound_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) Return(_a0 uint64, _a1 error) *CommitStoreReader_GetLatestPriceEpochAndRound_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_GetLatestPriceEpochAndRound_Call) RunAndReturn(run func(context.Context) (uint64, error)) *CommitStoreReader_GetLatestPriceEpochAndRound_Call { + _c.Call.Return(run) + return _c +} + +// IsBlessed provides a mock function with given fields: ctx, root +func (_m *CommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + ret := _m.Called(ctx, root) + + if len(ret) == 0 { + panic("no return value specified for IsBlessed") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, [32]byte) (bool, error)); ok { + return rf(ctx, root) + } + if rf, ok := ret.Get(0).(func(context.Context, [32]byte) bool); ok { + r0 = rf(ctx, root) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, [32]byte) error); ok { + r1 = rf(ctx, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_IsBlessed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsBlessed' +type CommitStoreReader_IsBlessed_Call struct { + *mock.Call +} + +// IsBlessed is a helper method to define mock.On call +// - ctx context.Context +// - root [32]byte +func (_e *CommitStoreReader_Expecter) IsBlessed(ctx interface{}, root interface{}) *CommitStoreReader_IsBlessed_Call { + return &CommitStoreReader_IsBlessed_Call{Call: _e.mock.On("IsBlessed", ctx, root)} +} + +func (_c *CommitStoreReader_IsBlessed_Call) Run(run func(ctx context.Context, root [32]byte)) *CommitStoreReader_IsBlessed_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([32]byte)) + }) + return _c +} + +func (_c *CommitStoreReader_IsBlessed_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsBlessed_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_IsBlessed_Call) RunAndReturn(run func(context.Context, [32]byte) (bool, error)) *CommitStoreReader_IsBlessed_Call { + _c.Call.Return(run) + return _c +} + +// IsDestChainHealthy provides a mock function with given fields: ctx +func (_m *CommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsDestChainHealthy") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_IsDestChainHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDestChainHealthy' +type CommitStoreReader_IsDestChainHealthy_Call struct { + *mock.Call +} + +// IsDestChainHealthy is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) IsDestChainHealthy(ctx interface{}) *CommitStoreReader_IsDestChainHealthy_Call { + return &CommitStoreReader_IsDestChainHealthy_Call{Call: _e.mock.On("IsDestChainHealthy", ctx)} +} + +func (_c *CommitStoreReader_IsDestChainHealthy_Call) Run(run func(ctx context.Context)) *CommitStoreReader_IsDestChainHealthy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_IsDestChainHealthy_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsDestChainHealthy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_IsDestChainHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *CommitStoreReader_IsDestChainHealthy_Call { + _c.Call.Return(run) + return _c +} + +// IsDown provides a mock function with given fields: ctx +func (_m *CommitStoreReader) IsDown(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsDown") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_IsDown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDown' +type CommitStoreReader_IsDown_Call struct { + *mock.Call +} + +// IsDown is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) IsDown(ctx interface{}) *CommitStoreReader_IsDown_Call { + return &CommitStoreReader_IsDown_Call{Call: _e.mock.On("IsDown", ctx)} +} + +func (_c *CommitStoreReader_IsDown_Call) Run(run func(ctx context.Context)) *CommitStoreReader_IsDown_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_IsDown_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_IsDown_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_IsDown_Call) RunAndReturn(run func(context.Context) (bool, error)) *CommitStoreReader_IsDown_Call { + _c.Call.Return(run) + return _c +} + +// OffchainConfig provides a mock function with given fields: ctx +func (_m *CommitStoreReader) OffchainConfig(ctx context.Context) (ccip.CommitOffchainConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for OffchainConfig") + } + + var r0 ccip.CommitOffchainConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.CommitOffchainConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.CommitOffchainConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.CommitOffchainConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_OffchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OffchainConfig' +type CommitStoreReader_OffchainConfig_Call struct { + *mock.Call +} + +// OffchainConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *CommitStoreReader_Expecter) OffchainConfig(ctx interface{}) *CommitStoreReader_OffchainConfig_Call { + return &CommitStoreReader_OffchainConfig_Call{Call: _e.mock.On("OffchainConfig", ctx)} +} + +func (_c *CommitStoreReader_OffchainConfig_Call) Run(run func(ctx context.Context)) *CommitStoreReader_OffchainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *CommitStoreReader_OffchainConfig_Call) Return(_a0 ccip.CommitOffchainConfig, _a1 error) *CommitStoreReader_OffchainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_OffchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.CommitOffchainConfig, error)) *CommitStoreReader_OffchainConfig_Call { + _c.Call.Return(run) + return _c +} + +// SetGasEstimator provides a mock function with given fields: ctx, gpe +func (_m *CommitStoreReader) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error { + ret := _m.Called(ctx, gpe) + + if len(ret) == 0 { + panic("no return value specified for SetGasEstimator") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, gas.EvmFeeEstimator) error); ok { + r0 = rf(ctx, gpe) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitStoreReader_SetGasEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetGasEstimator' +type CommitStoreReader_SetGasEstimator_Call struct { + *mock.Call +} + +// SetGasEstimator is a helper method to define mock.On call +// - ctx context.Context +// - gpe gas.EvmFeeEstimator +func (_e *CommitStoreReader_Expecter) SetGasEstimator(ctx interface{}, gpe interface{}) *CommitStoreReader_SetGasEstimator_Call { + return &CommitStoreReader_SetGasEstimator_Call{Call: _e.mock.On("SetGasEstimator", ctx, gpe)} +} + +func (_c *CommitStoreReader_SetGasEstimator_Call) Run(run func(ctx context.Context, gpe gas.EvmFeeEstimator)) *CommitStoreReader_SetGasEstimator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(gas.EvmFeeEstimator)) + }) + return _c +} + +func (_c *CommitStoreReader_SetGasEstimator_Call) Return(_a0 error) *CommitStoreReader_SetGasEstimator_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CommitStoreReader_SetGasEstimator_Call) RunAndReturn(run func(context.Context, gas.EvmFeeEstimator) error) *CommitStoreReader_SetGasEstimator_Call { + _c.Call.Return(run) + return _c +} + +// SetSourceMaxGasPrice provides a mock function with given fields: ctx, sourceMaxGasPrice +func (_m *CommitStoreReader) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error { + ret := _m.Called(ctx, sourceMaxGasPrice) + + if len(ret) == 0 { + panic("no return value specified for SetSourceMaxGasPrice") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) error); ok { + r0 = rf(ctx, sourceMaxGasPrice) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CommitStoreReader_SetSourceMaxGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetSourceMaxGasPrice' +type CommitStoreReader_SetSourceMaxGasPrice_Call struct { + *mock.Call +} + +// SetSourceMaxGasPrice is a helper method to define mock.On call +// - ctx context.Context +// - sourceMaxGasPrice *big.Int +func (_e *CommitStoreReader_Expecter) SetSourceMaxGasPrice(ctx interface{}, sourceMaxGasPrice interface{}) *CommitStoreReader_SetSourceMaxGasPrice_Call { + return &CommitStoreReader_SetSourceMaxGasPrice_Call{Call: _e.mock.On("SetSourceMaxGasPrice", ctx, sourceMaxGasPrice)} +} + +func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) Run(run func(ctx context.Context, sourceMaxGasPrice *big.Int)) *CommitStoreReader_SetSourceMaxGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) Return(_a0 error) *CommitStoreReader_SetSourceMaxGasPrice_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *CommitStoreReader_SetSourceMaxGasPrice_Call) RunAndReturn(run func(context.Context, *big.Int) error) *CommitStoreReader_SetSourceMaxGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// VerifyExecutionReport provides a mock function with given fields: ctx, report +func (_m *CommitStoreReader) VerifyExecutionReport(ctx context.Context, report ccip.ExecReport) (bool, error) { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for VerifyExecutionReport") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) (bool, error)); ok { + return rf(ctx, report) + } + if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) bool); ok { + r0 = rf(ctx, report) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, ccip.ExecReport) error); ok { + r1 = rf(ctx, report) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitStoreReader_VerifyExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VerifyExecutionReport' +type CommitStoreReader_VerifyExecutionReport_Call struct { + *mock.Call +} + +// VerifyExecutionReport is a helper method to define mock.On call +// - ctx context.Context +// - report ccip.ExecReport +func (_e *CommitStoreReader_Expecter) VerifyExecutionReport(ctx interface{}, report interface{}) *CommitStoreReader_VerifyExecutionReport_Call { + return &CommitStoreReader_VerifyExecutionReport_Call{Call: _e.mock.On("VerifyExecutionReport", ctx, report)} +} + +func (_c *CommitStoreReader_VerifyExecutionReport_Call) Run(run func(ctx context.Context, report ccip.ExecReport)) *CommitStoreReader_VerifyExecutionReport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ccip.ExecReport)) + }) + return _c +} + +func (_c *CommitStoreReader_VerifyExecutionReport_Call) Return(_a0 bool, _a1 error) *CommitStoreReader_VerifyExecutionReport_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *CommitStoreReader_VerifyExecutionReport_Call) RunAndReturn(run func(context.Context, ccip.ExecReport) (bool, error)) *CommitStoreReader_VerifyExecutionReport_Call { + _c.Call.Return(run) + return _c +} + +// NewCommitStoreReader creates a new instance of CommitStoreReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommitStoreReader(t interface { + mock.TestingT + Cleanup(func()) +}) *CommitStoreReader { + mock := &CommitStoreReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go new file mode 100644 index 00000000000..f383ccdc0ba --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/offramp_reader_mock.go @@ -0,0 +1,949 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// OffRampReader is an autogenerated mock type for the OffRampReader type +type OffRampReader struct { + mock.Mock +} + +type OffRampReader_Expecter struct { + mock *mock.Mock +} + +func (_m *OffRampReader) EXPECT() *OffRampReader_Expecter { + return &OffRampReader_Expecter{mock: &_m.Mock} +} + +// Address provides a mock function with given fields: ctx +func (_m *OffRampReader) Address(ctx context.Context) (ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address' +type OffRampReader_Address_Call struct { + *mock.Call +} + +// Address is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) Address(ctx interface{}) *OffRampReader_Address_Call { + return &OffRampReader_Address_Call{Call: _e.mock.On("Address", ctx)} +} + +func (_c *OffRampReader_Address_Call) Run(run func(ctx context.Context)) *OffRampReader_Address_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *OffRampReader_Address_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OffRampReader_Address_Call { + _c.Call.Return(run) + return _c +} + +// ChangeConfig provides a mock function with given fields: ctx, onchainConfig, offchainConfig +func (_m *OffRampReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (ccip.Address, ccip.Address, error) { + ret := _m.Called(ctx, onchainConfig, offchainConfig) + + if len(ret) == 0 { + panic("no return value specified for ChangeConfig") + } + + var r0 ccip.Address + var r1 ccip.Address + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) (ccip.Address, ccip.Address, error)); ok { + return rf(ctx, onchainConfig, offchainConfig) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte) ccip.Address); ok { + r0 = rf(ctx, onchainConfig, offchainConfig) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte) ccip.Address); ok { + r1 = rf(ctx, onchainConfig, offchainConfig) + } else { + r1 = ret.Get(1).(ccip.Address) + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, []byte) error); ok { + r2 = rf(ctx, onchainConfig, offchainConfig) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// OffRampReader_ChangeConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeConfig' +type OffRampReader_ChangeConfig_Call struct { + *mock.Call +} + +// ChangeConfig is a helper method to define mock.On call +// - ctx context.Context +// - onchainConfig []byte +// - offchainConfig []byte +func (_e *OffRampReader_Expecter) ChangeConfig(ctx interface{}, onchainConfig interface{}, offchainConfig interface{}) *OffRampReader_ChangeConfig_Call { + return &OffRampReader_ChangeConfig_Call{Call: _e.mock.On("ChangeConfig", ctx, onchainConfig, offchainConfig)} +} + +func (_c *OffRampReader_ChangeConfig_Call) Run(run func(ctx context.Context, onchainConfig []byte, offchainConfig []byte)) *OffRampReader_ChangeConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]byte), args[2].([]byte)) + }) + return _c +} + +func (_c *OffRampReader_ChangeConfig_Call) Return(_a0 ccip.Address, _a1 ccip.Address, _a2 error) *OffRampReader_ChangeConfig_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *OffRampReader_ChangeConfig_Call) RunAndReturn(run func(context.Context, []byte, []byte) (ccip.Address, ccip.Address, error)) *OffRampReader_ChangeConfig_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *OffRampReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OffRampReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type OffRampReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *OffRampReader_Expecter) Close() *OffRampReader_Close_Call { + return &OffRampReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *OffRampReader_Close_Call) Run(run func()) *OffRampReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *OffRampReader_Close_Call) Return(_a0 error) *OffRampReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *OffRampReader_Close_Call) RunAndReturn(run func() error) *OffRampReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// CurrentRateLimiterState provides a mock function with given fields: ctx +func (_m *OffRampReader) CurrentRateLimiterState(ctx context.Context) (ccip.TokenBucketRateLimit, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for CurrentRateLimiterState") + } + + var r0 ccip.TokenBucketRateLimit + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.TokenBucketRateLimit, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.TokenBucketRateLimit); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.TokenBucketRateLimit) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_CurrentRateLimiterState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CurrentRateLimiterState' +type OffRampReader_CurrentRateLimiterState_Call struct { + *mock.Call +} + +// CurrentRateLimiterState is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) CurrentRateLimiterState(ctx interface{}) *OffRampReader_CurrentRateLimiterState_Call { + return &OffRampReader_CurrentRateLimiterState_Call{Call: _e.mock.On("CurrentRateLimiterState", ctx)} +} + +func (_c *OffRampReader_CurrentRateLimiterState_Call) Run(run func(ctx context.Context)) *OffRampReader_CurrentRateLimiterState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_CurrentRateLimiterState_Call) Return(_a0 ccip.TokenBucketRateLimit, _a1 error) *OffRampReader_CurrentRateLimiterState_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_CurrentRateLimiterState_Call) RunAndReturn(run func(context.Context) (ccip.TokenBucketRateLimit, error)) *OffRampReader_CurrentRateLimiterState_Call { + _c.Call.Return(run) + return _c +} + +// DecodeExecutionReport provides a mock function with given fields: ctx, report +func (_m *OffRampReader) DecodeExecutionReport(ctx context.Context, report []byte) (ccip.ExecReport, error) { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for DecodeExecutionReport") + } + + var r0 ccip.ExecReport + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte) (ccip.ExecReport, error)); ok { + return rf(ctx, report) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte) ccip.ExecReport); ok { + r0 = rf(ctx, report) + } else { + r0 = ret.Get(0).(ccip.ExecReport) + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, report) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_DecodeExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DecodeExecutionReport' +type OffRampReader_DecodeExecutionReport_Call struct { + *mock.Call +} + +// DecodeExecutionReport is a helper method to define mock.On call +// - ctx context.Context +// - report []byte +func (_e *OffRampReader_Expecter) DecodeExecutionReport(ctx interface{}, report interface{}) *OffRampReader_DecodeExecutionReport_Call { + return &OffRampReader_DecodeExecutionReport_Call{Call: _e.mock.On("DecodeExecutionReport", ctx, report)} +} + +func (_c *OffRampReader_DecodeExecutionReport_Call) Run(run func(ctx context.Context, report []byte)) *OffRampReader_DecodeExecutionReport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]byte)) + }) + return _c +} + +func (_c *OffRampReader_DecodeExecutionReport_Call) Return(_a0 ccip.ExecReport, _a1 error) *OffRampReader_DecodeExecutionReport_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_DecodeExecutionReport_Call) RunAndReturn(run func(context.Context, []byte) (ccip.ExecReport, error)) *OffRampReader_DecodeExecutionReport_Call { + _c.Call.Return(run) + return _c +} + +// EncodeExecutionReport provides a mock function with given fields: ctx, report +func (_m *OffRampReader) EncodeExecutionReport(ctx context.Context, report ccip.ExecReport) ([]byte, error) { + ret := _m.Called(ctx, report) + + if len(ret) == 0 { + panic("no return value specified for EncodeExecutionReport") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) ([]byte, error)); ok { + return rf(ctx, report) + } + if rf, ok := ret.Get(0).(func(context.Context, ccip.ExecReport) []byte); ok { + r0 = rf(ctx, report) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ccip.ExecReport) error); ok { + r1 = rf(ctx, report) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_EncodeExecutionReport_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EncodeExecutionReport' +type OffRampReader_EncodeExecutionReport_Call struct { + *mock.Call +} + +// EncodeExecutionReport is a helper method to define mock.On call +// - ctx context.Context +// - report ccip.ExecReport +func (_e *OffRampReader_Expecter) EncodeExecutionReport(ctx interface{}, report interface{}) *OffRampReader_EncodeExecutionReport_Call { + return &OffRampReader_EncodeExecutionReport_Call{Call: _e.mock.On("EncodeExecutionReport", ctx, report)} +} + +func (_c *OffRampReader_EncodeExecutionReport_Call) Run(run func(ctx context.Context, report ccip.ExecReport)) *OffRampReader_EncodeExecutionReport_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ccip.ExecReport)) + }) + return _c +} + +func (_c *OffRampReader_EncodeExecutionReport_Call) Return(_a0 []byte, _a1 error) *OffRampReader_EncodeExecutionReport_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_EncodeExecutionReport_Call) RunAndReturn(run func(context.Context, ccip.ExecReport) ([]byte, error)) *OffRampReader_EncodeExecutionReport_Call { + _c.Call.Return(run) + return _c +} + +// GasPriceEstimator provides a mock function with given fields: ctx +func (_m *OffRampReader) GasPriceEstimator(ctx context.Context) (ccip.GasPriceEstimatorExec, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GasPriceEstimator") + } + + var r0 ccip.GasPriceEstimatorExec + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.GasPriceEstimatorExec, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.GasPriceEstimatorExec); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ccip.GasPriceEstimatorExec) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GasPriceEstimator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GasPriceEstimator' +type OffRampReader_GasPriceEstimator_Call struct { + *mock.Call +} + +// GasPriceEstimator is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) GasPriceEstimator(ctx interface{}) *OffRampReader_GasPriceEstimator_Call { + return &OffRampReader_GasPriceEstimator_Call{Call: _e.mock.On("GasPriceEstimator", ctx)} +} + +func (_c *OffRampReader_GasPriceEstimator_Call) Run(run func(ctx context.Context)) *OffRampReader_GasPriceEstimator_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_GasPriceEstimator_Call) Return(_a0 ccip.GasPriceEstimatorExec, _a1 error) *OffRampReader_GasPriceEstimator_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GasPriceEstimator_Call) RunAndReturn(run func(context.Context) (ccip.GasPriceEstimatorExec, error)) *OffRampReader_GasPriceEstimator_Call { + _c.Call.Return(run) + return _c +} + +// GetExecutionState provides a mock function with given fields: ctx, sequenceNumber +func (_m *OffRampReader) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) { + ret := _m.Called(ctx, sequenceNumber) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionState") + } + + var r0 uint8 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint8, error)); ok { + return rf(ctx, sequenceNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint8); ok { + r0 = rf(ctx, sequenceNumber) + } else { + r0 = ret.Get(0).(uint8) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, sequenceNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetExecutionState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExecutionState' +type OffRampReader_GetExecutionState_Call struct { + *mock.Call +} + +// GetExecutionState is a helper method to define mock.On call +// - ctx context.Context +// - sequenceNumber uint64 +func (_e *OffRampReader_Expecter) GetExecutionState(ctx interface{}, sequenceNumber interface{}) *OffRampReader_GetExecutionState_Call { + return &OffRampReader_GetExecutionState_Call{Call: _e.mock.On("GetExecutionState", ctx, sequenceNumber)} +} + +func (_c *OffRampReader_GetExecutionState_Call) Run(run func(ctx context.Context, sequenceNumber uint64)) *OffRampReader_GetExecutionState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *OffRampReader_GetExecutionState_Call) Return(_a0 uint8, _a1 error) *OffRampReader_GetExecutionState_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetExecutionState_Call) RunAndReturn(run func(context.Context, uint64) (uint8, error)) *OffRampReader_GetExecutionState_Call { + _c.Call.Return(run) + return _c +} + +// GetExecutionStateChangesBetweenSeqNums provides a mock function with given fields: ctx, seqNumMin, seqNumMax, confirmations +func (_m *OffRampReader) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin uint64, seqNumMax uint64, confirmations int) ([]ccip.ExecutionStateChangedWithTxMeta, error) { + ret := _m.Called(ctx, seqNumMin, seqNumMax, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionStateChangesBetweenSeqNums") + } + + var r0 []ccip.ExecutionStateChangedWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, int) ([]ccip.ExecutionStateChangedWithTxMeta, error)); ok { + return rf(ctx, seqNumMin, seqNumMax, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, int) []ccip.ExecutionStateChangedWithTxMeta); ok { + r0 = rf(ctx, seqNumMin, seqNumMax, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.ExecutionStateChangedWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, int) error); ok { + r1 = rf(ctx, seqNumMin, seqNumMax, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExecutionStateChangesBetweenSeqNums' +type OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call struct { + *mock.Call +} + +// GetExecutionStateChangesBetweenSeqNums is a helper method to define mock.On call +// - ctx context.Context +// - seqNumMin uint64 +// - seqNumMax uint64 +// - confirmations int +func (_e *OffRampReader_Expecter) GetExecutionStateChangesBetweenSeqNums(ctx interface{}, seqNumMin interface{}, seqNumMax interface{}, confirmations interface{}) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call { + return &OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call{Call: _e.mock.On("GetExecutionStateChangesBetweenSeqNums", ctx, seqNumMin, seqNumMax, confirmations)} +} + +func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) Run(run func(ctx context.Context, seqNumMin uint64, seqNumMax uint64, confirmations int)) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(int)) + }) + return _c +} + +func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) Return(_a0 []ccip.ExecutionStateChangedWithTxMeta, _a1 error) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call) RunAndReturn(run func(context.Context, uint64, uint64, int) ([]ccip.ExecutionStateChangedWithTxMeta, error)) *OffRampReader_GetExecutionStateChangesBetweenSeqNums_Call { + _c.Call.Return(run) + return _c +} + +// GetRouter provides a mock function with given fields: ctx +func (_m *OffRampReader) GetRouter(ctx context.Context) (ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetRouter") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetRouter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRouter' +type OffRampReader_GetRouter_Call struct { + *mock.Call +} + +// GetRouter is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) GetRouter(ctx interface{}) *OffRampReader_GetRouter_Call { + return &OffRampReader_GetRouter_Call{Call: _e.mock.On("GetRouter", ctx)} +} + +func (_c *OffRampReader_GetRouter_Call) Run(run func(ctx context.Context)) *OffRampReader_GetRouter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_GetRouter_Call) Return(_a0 ccip.Address, _a1 error) *OffRampReader_GetRouter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetRouter_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OffRampReader_GetRouter_Call { + _c.Call.Return(run) + return _c +} + +// GetSourceToDestTokensMapping provides a mock function with given fields: ctx +func (_m *OffRampReader) GetSourceToDestTokensMapping(ctx context.Context) (map[ccip.Address]ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetSourceToDestTokensMapping") + } + + var r0 map[ccip.Address]ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (map[ccip.Address]ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) map[ccip.Address]ccip.Address); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[ccip.Address]ccip.Address) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetSourceToDestTokensMapping_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSourceToDestTokensMapping' +type OffRampReader_GetSourceToDestTokensMapping_Call struct { + *mock.Call +} + +// GetSourceToDestTokensMapping is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) GetSourceToDestTokensMapping(ctx interface{}) *OffRampReader_GetSourceToDestTokensMapping_Call { + return &OffRampReader_GetSourceToDestTokensMapping_Call{Call: _e.mock.On("GetSourceToDestTokensMapping", ctx)} +} + +func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) Run(run func(ctx context.Context)) *OffRampReader_GetSourceToDestTokensMapping_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) Return(_a0 map[ccip.Address]ccip.Address, _a1 error) *OffRampReader_GetSourceToDestTokensMapping_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetSourceToDestTokensMapping_Call) RunAndReturn(run func(context.Context) (map[ccip.Address]ccip.Address, error)) *OffRampReader_GetSourceToDestTokensMapping_Call { + _c.Call.Return(run) + return _c +} + +// GetStaticConfig provides a mock function with given fields: ctx +func (_m *OffRampReader) GetStaticConfig(ctx context.Context) (ccip.OffRampStaticConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetStaticConfig") + } + + var r0 ccip.OffRampStaticConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.OffRampStaticConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.OffRampStaticConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.OffRampStaticConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetStaticConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStaticConfig' +type OffRampReader_GetStaticConfig_Call struct { + *mock.Call +} + +// GetStaticConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) GetStaticConfig(ctx interface{}) *OffRampReader_GetStaticConfig_Call { + return &OffRampReader_GetStaticConfig_Call{Call: _e.mock.On("GetStaticConfig", ctx)} +} + +func (_c *OffRampReader_GetStaticConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_GetStaticConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_GetStaticConfig_Call) Return(_a0 ccip.OffRampStaticConfig, _a1 error) *OffRampReader_GetStaticConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetStaticConfig_Call) RunAndReturn(run func(context.Context) (ccip.OffRampStaticConfig, error)) *OffRampReader_GetStaticConfig_Call { + _c.Call.Return(run) + return _c +} + +// GetTokens provides a mock function with given fields: ctx +func (_m *OffRampReader) GetTokens(ctx context.Context) (ccip.OffRampTokens, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetTokens") + } + + var r0 ccip.OffRampTokens + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.OffRampTokens, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.OffRampTokens); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.OffRampTokens) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_GetTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokens' +type OffRampReader_GetTokens_Call struct { + *mock.Call +} + +// GetTokens is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) GetTokens(ctx interface{}) *OffRampReader_GetTokens_Call { + return &OffRampReader_GetTokens_Call{Call: _e.mock.On("GetTokens", ctx)} +} + +func (_c *OffRampReader_GetTokens_Call) Run(run func(ctx context.Context)) *OffRampReader_GetTokens_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_GetTokens_Call) Return(_a0 ccip.OffRampTokens, _a1 error) *OffRampReader_GetTokens_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_GetTokens_Call) RunAndReturn(run func(context.Context) (ccip.OffRampTokens, error)) *OffRampReader_GetTokens_Call { + _c.Call.Return(run) + return _c +} + +// ListSenderNonces provides a mock function with given fields: ctx, senders +func (_m *OffRampReader) ListSenderNonces(ctx context.Context, senders []ccip.Address) (map[ccip.Address]uint64, error) { + ret := _m.Called(ctx, senders) + + if len(ret) == 0 { + panic("no return value specified for ListSenderNonces") + } + + var r0 map[ccip.Address]uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) (map[ccip.Address]uint64, error)); ok { + return rf(ctx, senders) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) map[ccip.Address]uint64); ok { + r0 = rf(ctx, senders) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[ccip.Address]uint64) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok { + r1 = rf(ctx, senders) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_ListSenderNonces_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSenderNonces' +type OffRampReader_ListSenderNonces_Call struct { + *mock.Call +} + +// ListSenderNonces is a helper method to define mock.On call +// - ctx context.Context +// - senders []ccip.Address +func (_e *OffRampReader_Expecter) ListSenderNonces(ctx interface{}, senders interface{}) *OffRampReader_ListSenderNonces_Call { + return &OffRampReader_ListSenderNonces_Call{Call: _e.mock.On("ListSenderNonces", ctx, senders)} +} + +func (_c *OffRampReader_ListSenderNonces_Call) Run(run func(ctx context.Context, senders []ccip.Address)) *OffRampReader_ListSenderNonces_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *OffRampReader_ListSenderNonces_Call) Return(_a0 map[ccip.Address]uint64, _a1 error) *OffRampReader_ListSenderNonces_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_ListSenderNonces_Call) RunAndReturn(run func(context.Context, []ccip.Address) (map[ccip.Address]uint64, error)) *OffRampReader_ListSenderNonces_Call { + _c.Call.Return(run) + return _c +} + +// OffchainConfig provides a mock function with given fields: ctx +func (_m *OffRampReader) OffchainConfig(ctx context.Context) (ccip.ExecOffchainConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for OffchainConfig") + } + + var r0 ccip.ExecOffchainConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.ExecOffchainConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.ExecOffchainConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.ExecOffchainConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_OffchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OffchainConfig' +type OffRampReader_OffchainConfig_Call struct { + *mock.Call +} + +// OffchainConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) OffchainConfig(ctx interface{}) *OffRampReader_OffchainConfig_Call { + return &OffRampReader_OffchainConfig_Call{Call: _e.mock.On("OffchainConfig", ctx)} +} + +func (_c *OffRampReader_OffchainConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_OffchainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_OffchainConfig_Call) Return(_a0 ccip.ExecOffchainConfig, _a1 error) *OffRampReader_OffchainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_OffchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.ExecOffchainConfig, error)) *OffRampReader_OffchainConfig_Call { + _c.Call.Return(run) + return _c +} + +// OnchainConfig provides a mock function with given fields: ctx +func (_m *OffRampReader) OnchainConfig(ctx context.Context) (ccip.ExecOnchainConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for OnchainConfig") + } + + var r0 ccip.ExecOnchainConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.ExecOnchainConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.ExecOnchainConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.ExecOnchainConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OffRampReader_OnchainConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnchainConfig' +type OffRampReader_OnchainConfig_Call struct { + *mock.Call +} + +// OnchainConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *OffRampReader_Expecter) OnchainConfig(ctx interface{}) *OffRampReader_OnchainConfig_Call { + return &OffRampReader_OnchainConfig_Call{Call: _e.mock.On("OnchainConfig", ctx)} +} + +func (_c *OffRampReader_OnchainConfig_Call) Run(run func(ctx context.Context)) *OffRampReader_OnchainConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OffRampReader_OnchainConfig_Call) Return(_a0 ccip.ExecOnchainConfig, _a1 error) *OffRampReader_OnchainConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OffRampReader_OnchainConfig_Call) RunAndReturn(run func(context.Context) (ccip.ExecOnchainConfig, error)) *OffRampReader_OnchainConfig_Call { + _c.Call.Return(run) + return _c +} + +// NewOffRampReader creates a new instance of OffRampReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOffRampReader(t interface { + mock.TestingT + Cleanup(func()) +}) *OffRampReader { + mock := &OffRampReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go new file mode 100644 index 00000000000..ccf5bd78463 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/onramp_reader_mock.go @@ -0,0 +1,480 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// OnRampReader is an autogenerated mock type for the OnRampReader type +type OnRampReader struct { + mock.Mock +} + +type OnRampReader_Expecter struct { + mock *mock.Mock +} + +func (_m *OnRampReader) EXPECT() *OnRampReader_Expecter { + return &OnRampReader_Expecter{mock: &_m.Mock} +} + +// Address provides a mock function with given fields: ctx +func (_m *OnRampReader) Address(ctx context.Context) (ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address' +type OnRampReader_Address_Call struct { + *mock.Call +} + +// Address is a helper method to define mock.On call +// - ctx context.Context +func (_e *OnRampReader_Expecter) Address(ctx interface{}) *OnRampReader_Address_Call { + return &OnRampReader_Address_Call{Call: _e.mock.On("Address", ctx)} +} + +func (_c *OnRampReader_Address_Call) Run(run func(ctx context.Context)) *OnRampReader_Address_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_Address_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_Address_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *OnRampReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnRampReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type OnRampReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *OnRampReader_Expecter) Close() *OnRampReader_Close_Call { + return &OnRampReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *OnRampReader_Close_Call) Run(run func()) *OnRampReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *OnRampReader_Close_Call) Return(_a0 error) *OnRampReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *OnRampReader_Close_Call) RunAndReturn(run func() error) *OnRampReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// GetDynamicConfig provides a mock function with given fields: ctx +func (_m *OnRampReader) GetDynamicConfig(ctx context.Context) (ccip.OnRampDynamicConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetDynamicConfig") + } + + var r0 ccip.OnRampDynamicConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.OnRampDynamicConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.OnRampDynamicConfig); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.OnRampDynamicConfig) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_GetDynamicConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDynamicConfig' +type OnRampReader_GetDynamicConfig_Call struct { + *mock.Call +} + +// GetDynamicConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *OnRampReader_Expecter) GetDynamicConfig(ctx interface{}) *OnRampReader_GetDynamicConfig_Call { + return &OnRampReader_GetDynamicConfig_Call{Call: _e.mock.On("GetDynamicConfig", ctx)} +} + +func (_c *OnRampReader_GetDynamicConfig_Call) Run(run func(ctx context.Context)) *OnRampReader_GetDynamicConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_GetDynamicConfig_Call) Return(_a0 ccip.OnRampDynamicConfig, _a1 error) *OnRampReader_GetDynamicConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_GetDynamicConfig_Call) RunAndReturn(run func(context.Context) (ccip.OnRampDynamicConfig, error)) *OnRampReader_GetDynamicConfig_Call { + _c.Call.Return(run) + return _c +} + +// GetSendRequestsBetweenSeqNums provides a mock function with given fields: ctx, seqNumMin, seqNumMax, finalized +func (_m *OnRampReader) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin uint64, seqNumMax uint64, finalized bool) ([]ccip.EVM2EVMMessageWithTxMeta, error) { + ret := _m.Called(ctx, seqNumMin, seqNumMax, finalized) + + if len(ret) == 0 { + panic("no return value specified for GetSendRequestsBetweenSeqNums") + } + + var r0 []ccip.EVM2EVMMessageWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, bool) ([]ccip.EVM2EVMMessageWithTxMeta, error)); ok { + return rf(ctx, seqNumMin, seqNumMax, finalized) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, bool) []ccip.EVM2EVMMessageWithTxMeta); ok { + r0 = rf(ctx, seqNumMin, seqNumMax, finalized) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.EVM2EVMMessageWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, bool) error); ok { + r1 = rf(ctx, seqNumMin, seqNumMax, finalized) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_GetSendRequestsBetweenSeqNums_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSendRequestsBetweenSeqNums' +type OnRampReader_GetSendRequestsBetweenSeqNums_Call struct { + *mock.Call +} + +// GetSendRequestsBetweenSeqNums is a helper method to define mock.On call +// - ctx context.Context +// - seqNumMin uint64 +// - seqNumMax uint64 +// - finalized bool +func (_e *OnRampReader_Expecter) GetSendRequestsBetweenSeqNums(ctx interface{}, seqNumMin interface{}, seqNumMax interface{}, finalized interface{}) *OnRampReader_GetSendRequestsBetweenSeqNums_Call { + return &OnRampReader_GetSendRequestsBetweenSeqNums_Call{Call: _e.mock.On("GetSendRequestsBetweenSeqNums", ctx, seqNumMin, seqNumMax, finalized)} +} + +func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) Run(run func(ctx context.Context, seqNumMin uint64, seqNumMax uint64, finalized bool)) *OnRampReader_GetSendRequestsBetweenSeqNums_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(bool)) + }) + return _c +} + +func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) Return(_a0 []ccip.EVM2EVMMessageWithTxMeta, _a1 error) *OnRampReader_GetSendRequestsBetweenSeqNums_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_GetSendRequestsBetweenSeqNums_Call) RunAndReturn(run func(context.Context, uint64, uint64, bool) ([]ccip.EVM2EVMMessageWithTxMeta, error)) *OnRampReader_GetSendRequestsBetweenSeqNums_Call { + _c.Call.Return(run) + return _c +} + +// IsSourceChainHealthy provides a mock function with given fields: ctx +func (_m *OnRampReader) IsSourceChainHealthy(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsSourceChainHealthy") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_IsSourceChainHealthy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSourceChainHealthy' +type OnRampReader_IsSourceChainHealthy_Call struct { + *mock.Call +} + +// IsSourceChainHealthy is a helper method to define mock.On call +// - ctx context.Context +func (_e *OnRampReader_Expecter) IsSourceChainHealthy(ctx interface{}) *OnRampReader_IsSourceChainHealthy_Call { + return &OnRampReader_IsSourceChainHealthy_Call{Call: _e.mock.On("IsSourceChainHealthy", ctx)} +} + +func (_c *OnRampReader_IsSourceChainHealthy_Call) Run(run func(ctx context.Context)) *OnRampReader_IsSourceChainHealthy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_IsSourceChainHealthy_Call) Return(_a0 bool, _a1 error) *OnRampReader_IsSourceChainHealthy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_IsSourceChainHealthy_Call) RunAndReturn(run func(context.Context) (bool, error)) *OnRampReader_IsSourceChainHealthy_Call { + _c.Call.Return(run) + return _c +} + +// IsSourceCursed provides a mock function with given fields: ctx +func (_m *OnRampReader) IsSourceCursed(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for IsSourceCursed") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_IsSourceCursed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSourceCursed' +type OnRampReader_IsSourceCursed_Call struct { + *mock.Call +} + +// IsSourceCursed is a helper method to define mock.On call +// - ctx context.Context +func (_e *OnRampReader_Expecter) IsSourceCursed(ctx interface{}) *OnRampReader_IsSourceCursed_Call { + return &OnRampReader_IsSourceCursed_Call{Call: _e.mock.On("IsSourceCursed", ctx)} +} + +func (_c *OnRampReader_IsSourceCursed_Call) Run(run func(ctx context.Context)) *OnRampReader_IsSourceCursed_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_IsSourceCursed_Call) Return(_a0 bool, _a1 error) *OnRampReader_IsSourceCursed_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_IsSourceCursed_Call) RunAndReturn(run func(context.Context) (bool, error)) *OnRampReader_IsSourceCursed_Call { + _c.Call.Return(run) + return _c +} + +// RouterAddress provides a mock function with given fields: _a0 +func (_m *OnRampReader) RouterAddress(_a0 context.Context) (ccip.Address, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for RouterAddress") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_RouterAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RouterAddress' +type OnRampReader_RouterAddress_Call struct { + *mock.Call +} + +// RouterAddress is a helper method to define mock.On call +// - _a0 context.Context +func (_e *OnRampReader_Expecter) RouterAddress(_a0 interface{}) *OnRampReader_RouterAddress_Call { + return &OnRampReader_RouterAddress_Call{Call: _e.mock.On("RouterAddress", _a0)} +} + +func (_c *OnRampReader_RouterAddress_Call) Run(run func(_a0 context.Context)) *OnRampReader_RouterAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_RouterAddress_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_RouterAddress_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_RouterAddress_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_RouterAddress_Call { + _c.Call.Return(run) + return _c +} + +// SourcePriceRegistryAddress provides a mock function with given fields: ctx +func (_m *OnRampReader) SourcePriceRegistryAddress(ctx context.Context) (ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SourcePriceRegistryAddress") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnRampReader_SourcePriceRegistryAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SourcePriceRegistryAddress' +type OnRampReader_SourcePriceRegistryAddress_Call struct { + *mock.Call +} + +// SourcePriceRegistryAddress is a helper method to define mock.On call +// - ctx context.Context +func (_e *OnRampReader_Expecter) SourcePriceRegistryAddress(ctx interface{}) *OnRampReader_SourcePriceRegistryAddress_Call { + return &OnRampReader_SourcePriceRegistryAddress_Call{Call: _e.mock.On("SourcePriceRegistryAddress", ctx)} +} + +func (_c *OnRampReader_SourcePriceRegistryAddress_Call) Run(run func(ctx context.Context)) *OnRampReader_SourcePriceRegistryAddress_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *OnRampReader_SourcePriceRegistryAddress_Call) Return(_a0 ccip.Address, _a1 error) *OnRampReader_SourcePriceRegistryAddress_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *OnRampReader_SourcePriceRegistryAddress_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *OnRampReader_SourcePriceRegistryAddress_Call { + _c.Call.Return(run) + return _c +} + +// NewOnRampReader creates a new instance of OnRampReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOnRampReader(t interface { + mock.TestingT + Cleanup(func()) +}) *OnRampReader { + mock := &OnRampReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go new file mode 100644 index 00000000000..94e354acb25 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/price_registry_reader_mock.go @@ -0,0 +1,498 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + context "context" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// PriceRegistryReader is an autogenerated mock type for the PriceRegistryReader type +type PriceRegistryReader struct { + mock.Mock +} + +type PriceRegistryReader_Expecter struct { + mock *mock.Mock +} + +func (_m *PriceRegistryReader) EXPECT() *PriceRegistryReader_Expecter { + return &PriceRegistryReader_Expecter{mock: &_m.Mock} +} + +// Address provides a mock function with given fields: ctx +func (_m *PriceRegistryReader) Address(ctx context.Context) (ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) ccip.Address); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(ccip.Address) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address' +type PriceRegistryReader_Address_Call struct { + *mock.Call +} + +// Address is a helper method to define mock.On call +// - ctx context.Context +func (_e *PriceRegistryReader_Expecter) Address(ctx interface{}) *PriceRegistryReader_Address_Call { + return &PriceRegistryReader_Address_Call{Call: _e.mock.On("Address", ctx)} +} + +func (_c *PriceRegistryReader_Address_Call) Run(run func(ctx context.Context)) *PriceRegistryReader_Address_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *PriceRegistryReader_Address_Call) Return(_a0 ccip.Address, _a1 error) *PriceRegistryReader_Address_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_Address_Call) RunAndReturn(run func(context.Context) (ccip.Address, error)) *PriceRegistryReader_Address_Call { + _c.Call.Return(run) + return _c +} + +// Close provides a mock function with given fields: +func (_m *PriceRegistryReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PriceRegistryReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type PriceRegistryReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *PriceRegistryReader_Expecter) Close() *PriceRegistryReader_Close_Call { + return &PriceRegistryReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *PriceRegistryReader_Close_Call) Run(run func()) *PriceRegistryReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *PriceRegistryReader_Close_Call) Return(_a0 error) *PriceRegistryReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *PriceRegistryReader_Close_Call) RunAndReturn(run func() error) *PriceRegistryReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// GetAllGasPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, ts, confirmations +func (_m *PriceRegistryReader) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confirmations int) ([]ccip.GasPriceUpdateWithTxMeta, error) { + ret := _m.Called(ctx, ts, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetAllGasPriceUpdatesCreatedAfter") + } + + var r0 []ccip.GasPriceUpdateWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)); ok { + return rf(ctx, ts, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.GasPriceUpdateWithTxMeta); ok { + r0 = rf(ctx, ts, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.GasPriceUpdateWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok { + r1 = rf(ctx, ts, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllGasPriceUpdatesCreatedAfter' +type PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call struct { + *mock.Call +} + +// GetAllGasPriceUpdatesCreatedAfter is a helper method to define mock.On call +// - ctx context.Context +// - ts time.Time +// - confirmations int +func (_e *PriceRegistryReader_Expecter) GetAllGasPriceUpdatesCreatedAfter(ctx interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call { + return &PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetAllGasPriceUpdatesCreatedAfter", ctx, ts, confirmations)} +} + +func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(time.Time), args[2].(int)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.GasPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetAllGasPriceUpdatesCreatedAfter_Call { + _c.Call.Return(run) + return _c +} + +// GetFeeTokens provides a mock function with given fields: ctx +func (_m *PriceRegistryReader) GetFeeTokens(ctx context.Context) ([]ccip.Address, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetFeeTokens") + } + + var r0 []ccip.Address + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]ccip.Address, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []ccip.Address); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.Address) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetFeeTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFeeTokens' +type PriceRegistryReader_GetFeeTokens_Call struct { + *mock.Call +} + +// GetFeeTokens is a helper method to define mock.On call +// - ctx context.Context +func (_e *PriceRegistryReader_Expecter) GetFeeTokens(ctx interface{}) *PriceRegistryReader_GetFeeTokens_Call { + return &PriceRegistryReader_GetFeeTokens_Call{Call: _e.mock.On("GetFeeTokens", ctx)} +} + +func (_c *PriceRegistryReader_GetFeeTokens_Call) Run(run func(ctx context.Context)) *PriceRegistryReader_GetFeeTokens_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetFeeTokens_Call) Return(_a0 []ccip.Address, _a1 error) *PriceRegistryReader_GetFeeTokens_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetFeeTokens_Call) RunAndReturn(run func(context.Context) ([]ccip.Address, error)) *PriceRegistryReader_GetFeeTokens_Call { + _c.Call.Return(run) + return _c +} + +// GetGasPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, chainSelector, ts, confirmations +func (_m *PriceRegistryReader) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confirmations int) ([]ccip.GasPriceUpdateWithTxMeta, error) { + ret := _m.Called(ctx, chainSelector, ts, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetGasPriceUpdatesCreatedAfter") + } + + var r0 []ccip.GasPriceUpdateWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)); ok { + return rf(ctx, chainSelector, ts, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time, int) []ccip.GasPriceUpdateWithTxMeta); ok { + r0 = rf(ctx, chainSelector, ts, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.GasPriceUpdateWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, time.Time, int) error); ok { + r1 = rf(ctx, chainSelector, ts, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPriceUpdatesCreatedAfter' +type PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call struct { + *mock.Call +} + +// GetGasPriceUpdatesCreatedAfter is a helper method to define mock.On call +// - ctx context.Context +// - chainSelector uint64 +// - ts time.Time +// - confirmations int +func (_e *PriceRegistryReader_Expecter) GetGasPriceUpdatesCreatedAfter(ctx interface{}, chainSelector interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call { + return &PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetGasPriceUpdatesCreatedAfter", ctx, chainSelector, ts, confirmations)} +} + +func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, chainSelector uint64, ts time.Time, confirmations int)) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(time.Time), args[3].(int)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.GasPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, uint64, time.Time, int) ([]ccip.GasPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetGasPriceUpdatesCreatedAfter_Call { + _c.Call.Return(run) + return _c +} + +// GetTokenPriceUpdatesCreatedAfter provides a mock function with given fields: ctx, ts, confirmations +func (_m *PriceRegistryReader) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confirmations int) ([]ccip.TokenPriceUpdateWithTxMeta, error) { + ret := _m.Called(ctx, ts, confirmations) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPriceUpdatesCreatedAfter") + } + + var r0 []ccip.TokenPriceUpdateWithTxMeta + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) ([]ccip.TokenPriceUpdateWithTxMeta, error)); ok { + return rf(ctx, ts, confirmations) + } + if rf, ok := ret.Get(0).(func(context.Context, time.Time, int) []ccip.TokenPriceUpdateWithTxMeta); ok { + r0 = rf(ctx, ts, confirmations) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.TokenPriceUpdateWithTxMeta) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, time.Time, int) error); ok { + r1 = rf(ctx, ts, confirmations) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokenPriceUpdatesCreatedAfter' +type PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call struct { + *mock.Call +} + +// GetTokenPriceUpdatesCreatedAfter is a helper method to define mock.On call +// - ctx context.Context +// - ts time.Time +// - confirmations int +func (_e *PriceRegistryReader_Expecter) GetTokenPriceUpdatesCreatedAfter(ctx interface{}, ts interface{}, confirmations interface{}) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call { + return &PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call{Call: _e.mock.On("GetTokenPriceUpdatesCreatedAfter", ctx, ts, confirmations)} +} + +func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) Run(run func(ctx context.Context, ts time.Time, confirmations int)) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(time.Time), args[2].(int)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) Return(_a0 []ccip.TokenPriceUpdateWithTxMeta, _a1 error) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call) RunAndReturn(run func(context.Context, time.Time, int) ([]ccip.TokenPriceUpdateWithTxMeta, error)) *PriceRegistryReader_GetTokenPriceUpdatesCreatedAfter_Call { + _c.Call.Return(run) + return _c +} + +// GetTokenPrices provides a mock function with given fields: ctx, wantedTokens +func (_m *PriceRegistryReader) GetTokenPrices(ctx context.Context, wantedTokens []ccip.Address) ([]ccip.TokenPriceUpdate, error) { + ret := _m.Called(ctx, wantedTokens) + + if len(ret) == 0 { + panic("no return value specified for GetTokenPrices") + } + + var r0 []ccip.TokenPriceUpdate + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.TokenPriceUpdate, error)); ok { + return rf(ctx, wantedTokens) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.TokenPriceUpdate); ok { + r0 = rf(ctx, wantedTokens) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.TokenPriceUpdate) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok { + r1 = rf(ctx, wantedTokens) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetTokenPrices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokenPrices' +type PriceRegistryReader_GetTokenPrices_Call struct { + *mock.Call +} + +// GetTokenPrices is a helper method to define mock.On call +// - ctx context.Context +// - wantedTokens []ccip.Address +func (_e *PriceRegistryReader_Expecter) GetTokenPrices(ctx interface{}, wantedTokens interface{}) *PriceRegistryReader_GetTokenPrices_Call { + return &PriceRegistryReader_GetTokenPrices_Call{Call: _e.mock.On("GetTokenPrices", ctx, wantedTokens)} +} + +func (_c *PriceRegistryReader_GetTokenPrices_Call) Run(run func(ctx context.Context, wantedTokens []ccip.Address)) *PriceRegistryReader_GetTokenPrices_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetTokenPrices_Call) Return(_a0 []ccip.TokenPriceUpdate, _a1 error) *PriceRegistryReader_GetTokenPrices_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetTokenPrices_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.TokenPriceUpdate, error)) *PriceRegistryReader_GetTokenPrices_Call { + _c.Call.Return(run) + return _c +} + +// GetTokensDecimals provides a mock function with given fields: ctx, tokenAddresses +func (_m *PriceRegistryReader) GetTokensDecimals(ctx context.Context, tokenAddresses []ccip.Address) ([]uint8, error) { + ret := _m.Called(ctx, tokenAddresses) + + if len(ret) == 0 { + panic("no return value specified for GetTokensDecimals") + } + + var r0 []uint8 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]uint8, error)); ok { + return rf(ctx, tokenAddresses) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []uint8); ok { + r0 = rf(ctx, tokenAddresses) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]uint8) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok { + r1 = rf(ctx, tokenAddresses) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PriceRegistryReader_GetTokensDecimals_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTokensDecimals' +type PriceRegistryReader_GetTokensDecimals_Call struct { + *mock.Call +} + +// GetTokensDecimals is a helper method to define mock.On call +// - ctx context.Context +// - tokenAddresses []ccip.Address +func (_e *PriceRegistryReader_Expecter) GetTokensDecimals(ctx interface{}, tokenAddresses interface{}) *PriceRegistryReader_GetTokensDecimals_Call { + return &PriceRegistryReader_GetTokensDecimals_Call{Call: _e.mock.On("GetTokensDecimals", ctx, tokenAddresses)} +} + +func (_c *PriceRegistryReader_GetTokensDecimals_Call) Run(run func(ctx context.Context, tokenAddresses []ccip.Address)) *PriceRegistryReader_GetTokensDecimals_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *PriceRegistryReader_GetTokensDecimals_Call) Return(_a0 []uint8, _a1 error) *PriceRegistryReader_GetTokensDecimals_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *PriceRegistryReader_GetTokensDecimals_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]uint8, error)) *PriceRegistryReader_GetTokensDecimals_Call { + _c.Call.Return(run) + return _c +} + +// NewPriceRegistryReader creates a new instance of PriceRegistryReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPriceRegistryReader(t interface { + mock.TestingT + Cleanup(func()) +}) *PriceRegistryReader { + mock := &PriceRegistryReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go new file mode 100644 index 00000000000..0bb23b9cc23 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/token_pool_reader_mock.go @@ -0,0 +1,127 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + common "github.com/ethereum/go-ethereum/common" + mock "github.com/stretchr/testify/mock" +) + +// TokenPoolReader is an autogenerated mock type for the TokenPoolReader type +type TokenPoolReader struct { + mock.Mock +} + +type TokenPoolReader_Expecter struct { + mock *mock.Mock +} + +func (_m *TokenPoolReader) EXPECT() *TokenPoolReader_Expecter { + return &TokenPoolReader_Expecter{mock: &_m.Mock} +} + +// Address provides a mock function with given fields: +func (_m *TokenPoolReader) Address() common.Address { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Address") + } + + var r0 common.Address + if rf, ok := ret.Get(0).(func() common.Address); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Address) + } + } + + return r0 +} + +// TokenPoolReader_Address_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Address' +type TokenPoolReader_Address_Call struct { + *mock.Call +} + +// Address is a helper method to define mock.On call +func (_e *TokenPoolReader_Expecter) Address() *TokenPoolReader_Address_Call { + return &TokenPoolReader_Address_Call{Call: _e.mock.On("Address")} +} + +func (_c *TokenPoolReader_Address_Call) Run(run func()) *TokenPoolReader_Address_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *TokenPoolReader_Address_Call) Return(_a0 common.Address) *TokenPoolReader_Address_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TokenPoolReader_Address_Call) RunAndReturn(run func() common.Address) *TokenPoolReader_Address_Call { + _c.Call.Return(run) + return _c +} + +// Type provides a mock function with given fields: +func (_m *TokenPoolReader) Type() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Type") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TokenPoolReader_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type' +type TokenPoolReader_Type_Call struct { + *mock.Call +} + +// Type is a helper method to define mock.On call +func (_e *TokenPoolReader_Expecter) Type() *TokenPoolReader_Type_Call { + return &TokenPoolReader_Type_Call{Call: _e.mock.On("Type")} +} + +func (_c *TokenPoolReader_Type_Call) Run(run func()) *TokenPoolReader_Type_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *TokenPoolReader_Type_Call) Return(_a0 string) *TokenPoolReader_Type_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TokenPoolReader_Type_Call) RunAndReturn(run func() string) *TokenPoolReader_Type_Call { + _c.Call.Return(run) + return _c +} + +// NewTokenPoolReader creates a new instance of TokenPoolReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTokenPoolReader(t interface { + mock.TestingT + Cleanup(func()) +}) *TokenPoolReader { + mock := &TokenPoolReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go new file mode 100644 index 00000000000..ac72d599923 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks/usdc_reader_mock.go @@ -0,0 +1,97 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// USDCReader is an autogenerated mock type for the USDCReader type +type USDCReader struct { + mock.Mock +} + +type USDCReader_Expecter struct { + mock *mock.Mock +} + +func (_m *USDCReader) EXPECT() *USDCReader_Expecter { + return &USDCReader_Expecter{mock: &_m.Mock} +} + +// GetUSDCMessagePriorToLogIndexInTx provides a mock function with given fields: ctx, logIndex, usdcTokenIndexOffset, txHash +func (_m *USDCReader) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error) { + ret := _m.Called(ctx, logIndex, usdcTokenIndexOffset, txHash) + + if len(ret) == 0 { + panic("no return value specified for GetUSDCMessagePriorToLogIndexInTx") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int64, int, string) ([]byte, error)); ok { + return rf(ctx, logIndex, usdcTokenIndexOffset, txHash) + } + if rf, ok := ret.Get(0).(func(context.Context, int64, int, string) []byte); ok { + r0 = rf(ctx, logIndex, usdcTokenIndexOffset, txHash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int64, int, string) error); ok { + r1 = rf(ctx, logIndex, usdcTokenIndexOffset, txHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUSDCMessagePriorToLogIndexInTx' +type USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call struct { + *mock.Call +} + +// GetUSDCMessagePriorToLogIndexInTx is a helper method to define mock.On call +// - ctx context.Context +// - logIndex int64 +// - usdcTokenIndexOffset int +// - txHash string +func (_e *USDCReader_Expecter) GetUSDCMessagePriorToLogIndexInTx(ctx interface{}, logIndex interface{}, usdcTokenIndexOffset interface{}, txHash interface{}) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call { + return &USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call{Call: _e.mock.On("GetUSDCMessagePriorToLogIndexInTx", ctx, logIndex, usdcTokenIndexOffset, txHash)} +} + +func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) Run(run func(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string)) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int64), args[2].(int), args[3].(string)) + }) + return _c +} + +func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) Return(_a0 []byte, _a1 error) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call) RunAndReturn(run func(context.Context, int64, int, string) ([]byte, error)) *USDCReader_GetUSDCMessagePriorToLogIndexInTx_Call { + _c.Call.Return(run) + return _c +} + +// NewUSDCReader creates a new instance of USDCReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUSDCReader(t interface { + mock.TestingT + Cleanup(func()) +}) *USDCReader { + mock := &USDCReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go new file mode 100644 index 00000000000..c3bad6235b3 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader.go @@ -0,0 +1,13 @@ +package ccipdata + +import ( + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +const ( + ManuallyExecute = "manuallyExecute" +) + +type OffRampReader interface { + cciptypes.OffRampReader +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go new file mode 100644 index 00000000000..6f14fb8559c --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/offramp_reader_test.go @@ -0,0 +1,416 @@ +package ccipdata_test + +import ( + "math/big" + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +type offRampReaderTH struct { + user *bind.TransactOpts + reader ccipdata.OffRampReader +} + +func TestExecOnchainConfig100(t *testing.T) { + tests := []struct { + name string + want v1_0_0.ExecOnchainConfig + expectErr bool + }{ + { + name: "encodes and decodes config with all fields set", + want: v1_0_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: rand.Uint32(), + Router: utils.RandomAddress(), + PriceRegistry: utils.RandomAddress(), + MaxTokensLength: uint16(rand.Uint32()), + MaxDataSize: rand.Uint32(), + }, + }, + { + name: "encodes and fails decoding config with missing fields", + want: v1_0_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: rand.Uint32(), + MaxDataSize: rand.Uint32(), + }, + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + encoded, err := abihelpers.EncodeAbiStruct(tt.want) + require.NoError(t, err) + + decoded, err := abihelpers.DecodeAbiStruct[v1_0_0.ExecOnchainConfig](encoded) + if tt.expectErr { + require.ErrorContains(t, err, "must set") + } else { + require.NoError(t, err) + require.Equal(t, tt.want, decoded) + } + }) + } +} + +func TestExecOnchainConfig120(t *testing.T) { + tests := []struct { + name string + want v1_2_0.ExecOnchainConfig + expectErr bool + }{ + { + name: "encodes and decodes config with all fields set", + want: v1_2_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: rand.Uint32(), + Router: utils.RandomAddress(), + PriceRegistry: utils.RandomAddress(), + MaxNumberOfTokensPerMsg: uint16(rand.Uint32()), + MaxDataBytes: rand.Uint32(), + MaxPoolReleaseOrMintGas: rand.Uint32(), + }, + }, + { + name: "encodes and fails decoding config with missing fields", + want: v1_2_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: rand.Uint32(), + MaxDataBytes: rand.Uint32(), + }, + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + encoded, err := abihelpers.EncodeAbiStruct(tt.want) + require.NoError(t, err) + + decoded, err := abihelpers.DecodeAbiStruct[v1_2_0.ExecOnchainConfig](encoded) + if tt.expectErr { + require.ErrorContains(t, err, "must set") + } else { + require.NoError(t, err) + require.Equal(t, tt.want, decoded) + } + }) + } +} + +func TestOffRampReaderInit(t *testing.T) { + tests := []struct { + name string + version string + }{ + { + name: "OffRampReader_V1_0_0", + version: ccipdata.V1_0_0, + }, + { + name: "OffRampReader_V1_1_0", + version: ccipdata.V1_1_0, + }, + { + name: "OffRampReader_V1_2_0", + version: ccipdata.V1_2_0, + }, + { + name: "OffRampReader_V1_5_0", + version: ccipdata.V1_5_0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + th := setupOffRampReaderTH(t, test.version) + testOffRampReader(t, th) + }) + } +} + +func setupOffRampReaderTH(t *testing.T, version string) offRampReaderTH { + ctx := testutils.Context(t) + user, bc := ccipdata.NewSimulation(t) + log := logger.TestLogger(t) + orm := logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), log) + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 2, + BackfillBatchSize: 3, + RpcBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + headTracker := headtracker.NewSimulatedHeadTracker(bc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + if lpOpts.PollPeriod == 0 { + lpOpts.PollPeriod = 1 * time.Hour + } + lp := logpoller.NewLogPoller( + orm, + bc, + log, + headTracker, + lpOpts) + assert.NoError(t, orm.InsertBlock(ctx, common.Hash{}, 1, time.Now(), 1)) + // Setup offRamp. + var offRampAddress common.Address + switch version { + case ccipdata.V1_0_0: + offRampAddress = setupOffRampV1_0_0(t, user, bc) + case ccipdata.V1_1_0: + // Version 1.1.0 uses the same contracts as 1.0.0. + offRampAddress = setupOffRampV1_0_0(t, user, bc) + case ccipdata.V1_2_0: + offRampAddress = setupOffRampV1_2_0(t, user, bc) + case ccipdata.V1_5_0: + offRampAddress = setupOffRampV1_5_0(t, user, bc) + default: + require.Fail(t, "Unknown version: ", version) + } + + // Create the version-specific reader. + reader, err := factory.NewOffRampReader(log, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(offRampAddress), bc, lp, nil, nil, true) + require.NoError(t, err) + addr, err := reader.Address(ctx) + require.NoError(t, err) + require.Equal(t, ccipcalc.EvmAddrToGeneric(offRampAddress), addr) + + return offRampReaderTH{ + user: user, + reader: reader, + } +} + +func setupOffRampV1_0_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + onRampAddr := utils.RandomAddress() + armAddr := deployMockArm(t, user, bc) + csAddr := deployCommitStore(t, user, bc, onRampAddr, armAddr) + + // Deploy the OffRamp. + staticConfig := evm_2_evm_offramp_1_0_0.EVM2EVMOffRampStaticConfig{ + CommitStore: csAddr, + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onRampAddr, + PrevOffRamp: common.Address{}, + ArmProxy: armAddr, + } + sourceTokens := []common.Address{} + pools := []common.Address{} + rateLimiterConfig := evm_2_evm_offramp_1_0_0.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + } + + offRampAddr, tx, offRamp, err := evm_2_evm_offramp_1_0_0.DeployEVM2EVMOffRamp(user, bc, staticConfig, sourceTokens, pools, rateLimiterConfig) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, tx, bc, user) + + // Verify the deployed OffRamp. + tav, err := offRamp.TypeAndVersion(&bind.CallOpts{ + Context: testutils.Context(t), + }) + require.NoError(t, err) + require.Equal(t, "EVM2EVMOffRamp 1.0.0", tav) + return offRampAddr +} + +func setupOffRampV1_2_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + onRampAddr := utils.RandomAddress() + armAddr := deployMockArm(t, user, bc) + csAddr := deployCommitStore(t, user, bc, onRampAddr, armAddr) + + // Deploy the OffRamp. + staticConfig := evm_2_evm_offramp_1_2_0.EVM2EVMOffRampStaticConfig{ + CommitStore: csAddr, + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onRampAddr, + PrevOffRamp: common.Address{}, + ArmProxy: armAddr, + } + sourceTokens := []common.Address{} + pools := []common.Address{} + rateLimiterConfig := evm_2_evm_offramp_1_2_0.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + } + + offRampAddr, tx, offRamp, err := evm_2_evm_offramp_1_2_0.DeployEVM2EVMOffRamp(user, bc, staticConfig, sourceTokens, pools, rateLimiterConfig) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, tx, bc, user) + + // Verify the deployed OffRamp. + tav, err := offRamp.TypeAndVersion(&bind.CallOpts{ + Context: testutils.Context(t), + }) + require.NoError(t, err) + require.Equal(t, "EVM2EVMOffRamp 1.2.0", tav) + return offRampAddr +} + +func setupOffRampV1_5_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + onRampAddr := utils.RandomAddress() + tokenAdminRegAddr := utils.RandomAddress() + rmnAddr := deployMockArm(t, user, bc) + csAddr := deployCommitStore(t, user, bc, onRampAddr, rmnAddr) + + // Deploy the OffRamp. + staticConfig := evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: csAddr, + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onRampAddr, + PrevOffRamp: common.Address{}, + RmnProxy: rmnAddr, + TokenAdminRegistry: tokenAdminRegAddr, + } + rateLimiterConfig := evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(0), + Rate: big.NewInt(0), + } + + offRampAddr, tx, offRamp, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp(user, bc, staticConfig, rateLimiterConfig) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, tx, bc, user) + + // Verify the deployed OffRamp. + tav, err := offRamp.TypeAndVersion(&bind.CallOpts{ + Context: testutils.Context(t), + }) + require.NoError(t, err) + require.Equal(t, "EVM2EVMOffRamp 1.5.0-dev", tav) + return offRampAddr +} + +func deployMockArm( + t *testing.T, + user *bind.TransactOpts, + bc *client.SimulatedBackendClient, +) common.Address { + armAddr, tx, _, err := mock_arm_contract.DeployMockARMContract(user, bc) + require.NoError(t, err) + bc.Commit() + ccipdata.AssertNonRevert(t, tx, bc, user) + require.NotEqual(t, common.Address{}, armAddr) + return armAddr +} + +// Deploy the CommitStore. We use the same CommitStore version for all versions of OffRamp tested. +func deployCommitStore( + t *testing.T, + user *bind.TransactOpts, + bc *client.SimulatedBackendClient, + onRampAddress common.Address, + armAddress common.Address, +) common.Address { + // Deploy the CommitStore using the helper. + csAddr, tx, cs, err := commit_store_helper.DeployCommitStoreHelper(user, bc, commit_store_helper.CommitStoreStaticConfig{ + ChainSelector: testutils.SimulatedChainID.Uint64(), + SourceChainSelector: testutils.SimulatedChainID.Uint64(), + OnRamp: onRampAddress, + RmnProxy: armAddress, + }) + require.NoError(t, err) + bc.Commit() + ccipdata.AssertNonRevert(t, tx, bc, user) + + // Test the deployed CommitStore. + callOpts := &bind.CallOpts{ + Context: testutils.Context(t), + } + tav, err := cs.TypeAndVersion(callOpts) + require.NoError(t, err) + require.Equal(t, "CommitStore 1.5.0-dev", tav) + return csAddr +} + +func testOffRampReader(t *testing.T, th offRampReaderTH) { + ctx := th.user.Context + tokens, err := th.reader.GetTokens(ctx) + require.NoError(t, err) + require.Equal(t, []cciptypes.Address{}, tokens.DestinationTokens) + + events, err := th.reader.GetExecutionStateChangesBetweenSeqNums(ctx, 0, 10, 0) + require.NoError(t, err) + require.Equal(t, []cciptypes.ExecutionStateChangedWithTxMeta{}, events) + + sourceToDestTokens, err := th.reader.GetSourceToDestTokensMapping(ctx) + require.NoError(t, err) + require.Empty(t, sourceToDestTokens) + + require.NoError(t, err) +} + +func TestNewOffRampReader(t *testing.T) { + var tt = []struct { + typeAndVersion string + expectedErr string + }{ + { + typeAndVersion: "blah", + expectedErr: "unable to read type and version: invalid type and version blah", + }, + { + typeAndVersion: "CommitStore 1.0.0", + expectedErr: "expected EVM2EVMOffRamp got CommitStore", + }, + { + typeAndVersion: "EVM2EVMOffRamp 1.2.0", + expectedErr: "", + }, + { + typeAndVersion: "EVM2EVMOffRamp 2.0.0", + expectedErr: "unsupported offramp version 2.0.0", + }, + } + for _, tc := range tt { + t.Run(tc.typeAndVersion, func(t *testing.T) { + b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion) + require.NoError(t, err) + c := evmclientmocks.NewClient(t) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil) + addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + lp := lpmocks.NewLogPoller(t) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe() + _, err = factory.NewOffRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, c, lp, nil, nil, true) + if tc.expectedErr != "" { + assert.EqualError(t, err, tc.expectedErr) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go new file mode 100644 index 00000000000..e2571de57f6 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader.go @@ -0,0 +1,21 @@ +package ccipdata + +import ( + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +type LeafHasherInterface[H hashutil.Hash] interface { + HashLeaf(log types.Log) (H, error) +} + +const ( + COMMIT_CCIP_SENDS = "Commit ccip sends" + CONFIG_CHANGED = "Dynamic config changed" +) + +type OnRampReader interface { + cciptypes.OnRampReader +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go new file mode 100644 index 00000000000..9cfe3f628c0 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/onramp_reader_test.go @@ -0,0 +1,479 @@ +package ccipdata_test + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_1_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" +) + +type onRampReaderTH struct { + user *bind.TransactOpts + reader ccipdata.OnRampReader +} + +func TestNewOnRampReader_noContractAtAddress(t *testing.T) { + _, bc := ccipdata.NewSimulation(t) + addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + _, err := factory.NewOnRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), testutils.SimulatedChainID.Uint64(), testutils.SimulatedChainID.Uint64(), addr, lpmocks.NewLogPoller(t), bc) + assert.EqualError(t, err, fmt.Sprintf("unable to read type and version: error calling typeAndVersion on addr: %s no contract code at given address", addr)) +} + +func TestOnRampReaderInit(t *testing.T) { + tests := []struct { + name string + version string + }{ + { + name: "OnRampReader_V1_0_0", + version: ccipdata.V1_0_0, + }, + { + name: "OnRampReader_V1_1_0", + version: ccipdata.V1_1_0, + }, + { + name: "OnRampReader_V1_2_0", + version: ccipdata.V1_2_0, + }, + { + name: "OnRampReader_V1_5_0", + version: ccipdata.V1_5_0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + th := setupOnRampReaderTH(t, test.version) + testVersionSpecificOnRampReader(t, th, test.version) + }) + } +} + +func setupOnRampReaderTH(t *testing.T, version string) onRampReaderTH { + user, bc := ccipdata.NewSimulation(t) + log := logger.TestLogger(t) + orm := logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), log) + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 2, + BackfillBatchSize: 3, + RpcBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + headTracker := headtracker.NewSimulatedHeadTracker(bc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + if lpOpts.PollPeriod == 0 { + lpOpts.PollPeriod = 1 * time.Hour + } + lp := logpoller.NewLogPoller( + orm, + bc, + log, + headTracker, + lpOpts) + + // Setup onRamp. + var onRampAddress common.Address + switch version { + case ccipdata.V1_0_0: + onRampAddress = setupOnRampV1_0_0(t, user, bc) + case ccipdata.V1_1_0: + onRampAddress = setupOnRampV1_1_0(t, user, bc) + case ccipdata.V1_2_0: + onRampAddress = setupOnRampV1_2_0(t, user, bc) + case ccipdata.V1_5_0: + onRampAddress = setupOnRampV1_5_0(t, user, bc) + default: + require.Fail(t, "Unknown version: ", version) + } + + // Create the version-specific reader. + reader, err := factory.NewOnRampReader(log, factory.NewEvmVersionFinder(), testutils.SimulatedChainID.Uint64(), testutils.SimulatedChainID.Uint64(), ccipcalc.EvmAddrToGeneric(onRampAddress), lp, bc) + require.NoError(t, err) + + return onRampReaderTH{ + user: user, + reader: reader, + } +} + +func setupOnRampV1_0_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + linkTokenAddress := common.HexToAddress("0x000011") + staticConfig := evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: testutils.SimulatedChainID.Uint64(), + DestChainSelector: testutils.SimulatedChainID.Uint64(), + DefaultTxGasLimit: 30000, + MaxNopFeesJuels: big.NewInt(1000000), + PrevOnRamp: common.Address{}, + ArmProxy: utils.RandomAddress(), + } + dynamicConfig := evm_2_evm_onramp_1_0_0.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x000100"), + MaxTokensLength: 4, + PriceRegistry: utils.RandomAddress(), + MaxDataSize: 100000, + MaxGasLimit: 100000, + } + rateLimiterConfig := evm_2_evm_onramp_1_0_0.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(5), + Rate: big.NewInt(5), + } + allowList := []common.Address{user.From} + feeTokenConfigs := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: linkTokenAddress, + GasMultiplier: 1, + NetworkFeeAmountUSD: big.NewInt(0), + DestGasOverhead: 50, + DestGasPerPayloadByte: 60, + Enabled: false, + }, + } + tokenTransferConfigArgs := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: utils.RandomAddress(), + MinFee: 10, + MaxFee: 1000, + Ratio: 1, + }, + } + nopsAndWeights := []evm_2_evm_onramp_1_0_0.EVM2EVMOnRampNopAndWeight{ + { + Nop: utils.RandomAddress(), + Weight: 1, + }, + } + tokenAndPool := []evm_2_evm_onramp_1_0_0.InternalPoolUpdate{} + onRampAddress, transaction, _, err := evm_2_evm_onramp_1_0_0.DeployEVM2EVMOnRamp( + user, + bc, + staticConfig, + dynamicConfig, + tokenAndPool, + allowList, + rateLimiterConfig, + feeTokenConfigs, + tokenTransferConfigArgs, + nopsAndWeights, + ) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + return onRampAddress +} + +func setupOnRampV1_1_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + linkTokenAddress := common.HexToAddress("0x000011") + staticConfig := evm_2_evm_onramp_1_1_0.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: testutils.SimulatedChainID.Uint64(), + DestChainSelector: testutils.SimulatedChainID.Uint64(), + DefaultTxGasLimit: 30000, + MaxNopFeesJuels: big.NewInt(1000000), + PrevOnRamp: common.Address{}, + ArmProxy: utils.RandomAddress(), + } + dynamicConfig := evm_2_evm_onramp_1_1_0.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x000110"), + MaxTokensLength: 4, + PriceRegistry: common.HexToAddress("0x000066"), + MaxDataSize: 100000, + MaxGasLimit: 100000, + } + rateLimiterConfig := evm_2_evm_onramp_1_1_0.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(5), + Rate: big.NewInt(5), + } + allowList := []common.Address{user.From} + feeTokenConfigs := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: linkTokenAddress, + NetworkFeeUSD: 0, + MinTokenTransferFeeUSD: 0, + MaxTokenTransferFeeUSD: 0, + GasMultiplier: 0, + PremiumMultiplier: 0, + Enabled: false, + }, + } + tokenTransferConfigArgs := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: linkTokenAddress, + Ratio: 0, + DestGasOverhead: 0, + }, + } + nopsAndWeights := []evm_2_evm_onramp_1_1_0.EVM2EVMOnRampNopAndWeight{ + { + Nop: common.HexToAddress("0x222222222"), + Weight: 1, + }, + } + tokenAndPool := []evm_2_evm_onramp_1_1_0.InternalPoolUpdate{} + onRampAddress, transaction, _, err := evm_2_evm_onramp_1_1_0.DeployEVM2EVMOnRamp( + user, + bc, + staticConfig, + dynamicConfig, + tokenAndPool, + allowList, + rateLimiterConfig, + feeTokenConfigs, + tokenTransferConfigArgs, + nopsAndWeights, + ) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + return onRampAddress +} + +func setupOnRampV1_2_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + linkTokenAddress := common.HexToAddress("0x000011") + staticConfig := evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: testutils.SimulatedChainID.Uint64(), + DestChainSelector: testutils.SimulatedChainID.Uint64(), + DefaultTxGasLimit: 30000, + MaxNopFeesJuels: big.NewInt(1000000), + PrevOnRamp: common.Address{}, + ArmProxy: utils.RandomAddress(), + } + dynamicConfig := evm_2_evm_onramp_1_2_0.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x0000000000000000000000000000000000000120"), + MaxNumberOfTokensPerMsg: 0, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: utils.RandomAddress(), + MaxDataBytes: 0, + MaxPerMsgGasLimit: 0, + } + rateLimiterConfig := evm_2_evm_onramp_1_2_0.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(5), + Rate: big.NewInt(5), + } + feeTokenConfigs := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: linkTokenAddress, + NetworkFeeUSDCents: 0, + GasMultiplierWeiPerEth: 0, + PremiumMultiplierWeiPerEth: 0, + Enabled: false, + }, + } + tokenTransferConfigArgs := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: linkTokenAddress, + MinFeeUSDCents: 0, + MaxFeeUSDCents: 0, + DeciBps: 0, + DestGasOverhead: 0, + DestBytesOverhead: 0, + }, + } + nopsAndWeights := []evm_2_evm_onramp_1_2_0.EVM2EVMOnRampNopAndWeight{ + { + Nop: utils.RandomAddress(), + Weight: 1, + }, + } + tokenAndPool := []evm_2_evm_onramp_1_2_0.InternalPoolUpdate{} + onRampAddress, transaction, _, err := evm_2_evm_onramp_1_2_0.DeployEVM2EVMOnRamp( + user, + bc, + staticConfig, + dynamicConfig, + tokenAndPool, + rateLimiterConfig, + feeTokenConfigs, + tokenTransferConfigArgs, + nopsAndWeights, + ) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + return onRampAddress +} + +func setupOnRampV1_5_0(t *testing.T, user *bind.TransactOpts, bc *client.SimulatedBackendClient) common.Address { + linkTokenAddress := common.HexToAddress("0x000011") + staticConfig := evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: testutils.SimulatedChainID.Uint64(), + DestChainSelector: testutils.SimulatedChainID.Uint64(), + DefaultTxGasLimit: 30000, + MaxNopFeesJuels: big.NewInt(1000000), + PrevOnRamp: common.Address{}, + RmnProxy: utils.RandomAddress(), + TokenAdminRegistry: utils.RandomAddress(), + } + dynamicConfig := evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x0000000000000000000000000000000000000150"), + MaxNumberOfTokensPerMsg: 0, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: utils.RandomAddress(), + MaxDataBytes: 0, + MaxPerMsgGasLimit: 0, + DefaultTokenFeeUSDCents: 50, + DefaultTokenDestGasOverhead: 34_000, + DefaultTokenDestBytesOverhead: 500, + } + rateLimiterConfig := evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(5), + Rate: big.NewInt(5), + } + feeTokenConfigs := []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: linkTokenAddress, + NetworkFeeUSDCents: 0, + GasMultiplierWeiPerEth: 0, + PremiumMultiplierWeiPerEth: 0, + Enabled: false, + }, + } + tokenTransferConfigArgs := []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: linkTokenAddress, + MinFeeUSDCents: 0, + MaxFeeUSDCents: 0, + DeciBps: 0, + DestGasOverhead: 0, + DestBytesOverhead: 64, + AggregateRateLimitEnabled: true, + }, + } + nopsAndWeights := []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{ + { + Nop: utils.RandomAddress(), + Weight: 1, + }, + } + onRampAddress, transaction, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + user, + bc, + staticConfig, + dynamicConfig, + rateLimiterConfig, + feeTokenConfigs, + tokenTransferConfigArgs, + nopsAndWeights, + ) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + return onRampAddress +} + +func testVersionSpecificOnRampReader(t *testing.T, th onRampReaderTH, version string) { + switch version { + case ccipdata.V1_0_0: + testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000100")) + case ccipdata.V1_1_0: + testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000110")) + case ccipdata.V1_2_0: + testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000120")) + case ccipdata.V1_5_0: + testOnRampReader(t, th, common.HexToAddress("0x0000000000000000000000000000000000000150")) + default: + require.Fail(t, "Unknown version: ", version) + } +} + +func testOnRampReader(t *testing.T, th onRampReaderTH, expectedRouterAddress common.Address) { + ctx := th.user.Context + res, err := th.reader.RouterAddress(ctx) + require.NoError(t, err) + require.Equal(t, ccipcalc.EvmAddrToGeneric(expectedRouterAddress), res) + + msg, err := th.reader.GetSendRequestsBetweenSeqNums(ctx, 0, 10, true) + require.NoError(t, err) + require.NotNil(t, msg) + require.Equal(t, []cciptypes.EVM2EVMMessageWithTxMeta{}, msg) + + address, err := th.reader.Address(ctx) + require.NoError(t, err) + require.NotNil(t, address) + + cfg, err := th.reader.GetDynamicConfig(ctx) + require.NoError(t, err) + require.NotNil(t, cfg) + require.Equal(t, ccipcalc.EvmAddrToGeneric(expectedRouterAddress), cfg.Router) +} + +func TestNewOnRampReader(t *testing.T) { + var tt = []struct { + typeAndVersion string + expectedErr string + }{ + { + typeAndVersion: "blah", + expectedErr: "unable to read type and version: invalid type and version blah", + }, + { + typeAndVersion: "EVM2EVMOffRamp 1.0.0", + expectedErr: "expected EVM2EVMOnRamp got EVM2EVMOffRamp", + }, + { + typeAndVersion: "EVM2EVMOnRamp 1.2.0", + expectedErr: "", + }, + { + typeAndVersion: "EVM2EVMOnRamp 2.0.0", + expectedErr: "unsupported onramp version 2.0.0", + }, + } + for _, tc := range tt { + t.Run(tc.typeAndVersion, func(t *testing.T) { + b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion) + require.NoError(t, err) + c := evmclientmocks.NewClient(t) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil) + addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + lp := lpmocks.NewLogPoller(t) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe() + _, err = factory.NewOnRampReader(logger.TestLogger(t), factory.NewEvmVersionFinder(), 1, 2, addr, lp, c) + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go new file mode 100644 index 00000000000..02aef5e9efc --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader.go @@ -0,0 +1,14 @@ +package ccipdata + +import cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + +const ( + COMMIT_PRICE_UPDATES = "Commit price updates" + FEE_TOKEN_ADDED = "Fee token added" + FEE_TOKEN_REMOVED = "Fee token removed" + ExecPluginLabel = "exec" +) + +type PriceRegistryReader interface { + cciptypes.PriceRegistryReader +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go new file mode 100644 index 00000000000..e17b885cff2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/price_registry_reader_test.go @@ -0,0 +1,296 @@ +package ccipdata_test + +import ( + "context" + "math/big" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmclientmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/factory" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +type priceRegReaderTH struct { + lp logpoller.LogPollerTest + ec client.Client + lggr logger.Logger + user *bind.TransactOpts + readers map[string]ccipdata.PriceRegistryReader + + // Expected state + blockTs []uint64 + expectedFeeTokens []common.Address + expectedGasUpdates map[uint64][]cciptypes.GasPrice + expectedTokenUpdates map[uint64][]cciptypes.TokenPrice + destSelectors []uint64 +} + +func commitAndGetBlockTs(ec *client.SimulatedBackendClient) uint64 { + h := ec.Commit() + b, _ := ec.BlockByHash(context.Background(), h) + return b.Time() +} + +func newSim(t *testing.T) (*bind.TransactOpts, *client.SimulatedBackendClient) { + user := testutils.MustNewSimTransactor(t) + sim := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + user.From: { + Balance: big.NewInt(0).Mul(big.NewInt(10), big.NewInt(1e18)), + }, + }, 10e6) + ec := client.NewSimulatedBackendClient(t, sim, testutils.SimulatedChainID) + return user, ec +} + +// setupPriceRegistryReaderTH instantiates all versions of the price registry reader +// with a snapshot of data so reader tests can do multi-version assertions. +func setupPriceRegistryReaderTH(t *testing.T) priceRegReaderTH { + user, ec := newSim(t) + lggr := logger.TestLogger(t) + lpOpts := logpoller.Opts{ + PollPeriod: 100 * time.Millisecond, + FinalityDepth: 2, + BackfillBatchSize: 3, + RpcBatchSize: 2, + KeepFinalizedBlocksDepth: 1000, + } + headTracker := headtracker.NewSimulatedHeadTracker(ec, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + if lpOpts.PollPeriod == 0 { + lpOpts.PollPeriod = 1 * time.Hour + } + // TODO: We should be able to use an in memory log poller ORM here to speed up the tests. + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.SimulatedChainID, pgtest.NewSqlxDB(t), lggr), ec, lggr, headTracker, lpOpts) + + feeTokens := []common.Address{utils.RandomAddress(), utils.RandomAddress()} + dest1 := uint64(10) + dest2 := uint64(11) + gasPriceUpdatesBlock1 := []cciptypes.GasPrice{ + { + DestChainSelector: dest1, + Value: big.NewInt(11), + }, + } + gasPriceUpdatesBlock2 := []cciptypes.GasPrice{ + { + DestChainSelector: dest1, // Reset same gas price + Value: big.NewInt(12), // Intentionally different from block1 + }, + { + DestChainSelector: dest2, // Set gas price for different chain + Value: big.NewInt(12), + }, + } + token1 := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + token2 := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + tokenPriceUpdatesBlock1 := []cciptypes.TokenPrice{ + { + Token: token1, + Value: big.NewInt(12), + }, + } + tokenPriceUpdatesBlock2 := []cciptypes.TokenPrice{ + { + Token: token1, + Value: big.NewInt(13), // Intentionally change token1 value + }, + { + Token: token2, + Value: big.NewInt(12), // Intentionally set a same value different token + }, + } + ctx := testutils.Context(t) + addr, _, _, err := price_registry_1_0_0.DeployPriceRegistry(user, ec, nil, feeTokens, 1000) + require.NoError(t, err) + addr2, _, _, err := price_registry_1_2_0.DeployPriceRegistry(user, ec, nil, feeTokens, 1000) + require.NoError(t, err) + commitAndGetBlockTs(ec) // Deploy these + pr10r, err := factory.NewPriceRegistryReader(ctx, lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr), lp, ec) + require.NoError(t, err) + assert.Equal(t, reflect.TypeOf(pr10r).String(), reflect.TypeOf(&v1_0_0.PriceRegistry{}).String()) + pr12r, err := factory.NewPriceRegistryReader(ctx, lggr, factory.NewEvmVersionFinder(), ccipcalc.EvmAddrToGeneric(addr2), lp, ec) + require.NoError(t, err) + assert.Equal(t, reflect.TypeOf(pr12r).String(), reflect.TypeOf(&v1_2_0.PriceRegistry{}).String()) + // Apply block1. + v1_0_0.ApplyPriceRegistryUpdate(t, user, addr, ec, gasPriceUpdatesBlock1, tokenPriceUpdatesBlock1) + v1_2_0.ApplyPriceRegistryUpdate(t, user, addr2, ec, gasPriceUpdatesBlock1, tokenPriceUpdatesBlock1) + b1 := commitAndGetBlockTs(ec) + // Apply block2 + v1_0_0.ApplyPriceRegistryUpdate(t, user, addr, ec, gasPriceUpdatesBlock2, tokenPriceUpdatesBlock2) + v1_2_0.ApplyPriceRegistryUpdate(t, user, addr2, ec, gasPriceUpdatesBlock2, tokenPriceUpdatesBlock2) + b2 := commitAndGetBlockTs(ec) + + // Capture all lp data. + lp.PollAndSaveLogs(context.Background(), 1) + + return priceRegReaderTH{ + lp: lp, + ec: ec, + lggr: lggr, + user: user, + readers: map[string]ccipdata.PriceRegistryReader{ + ccipdata.V1_0_0: pr10r, ccipdata.V1_2_0: pr12r, + }, + expectedFeeTokens: feeTokens, + expectedGasUpdates: map[uint64][]cciptypes.GasPrice{ + b1: gasPriceUpdatesBlock1, + b2: gasPriceUpdatesBlock2, + }, + expectedTokenUpdates: map[uint64][]cciptypes.TokenPrice{ + b1: tokenPriceUpdatesBlock1, + b2: tokenPriceUpdatesBlock2, + }, + blockTs: []uint64{b1, b2}, + destSelectors: []uint64{dest1, dest2}, + } +} + +func testPriceRegistryReader(t *testing.T, th priceRegReaderTH, pr ccipdata.PriceRegistryReader) { + // Assert have expected fee tokens. + gotFeeTokens, err := pr.GetFeeTokens(context.Background()) + require.NoError(t, err) + evmAddrs, err := ccipcalc.GenericAddrsToEvm(gotFeeTokens...) + require.NoError(t, err) + assert.Equal(t, th.expectedFeeTokens, evmAddrs) + + // Note unsupported chain selector simply returns an empty set not an error + gasUpdates, err := pr.GetGasPriceUpdatesCreatedAfter(context.Background(), 1e6, time.Unix(0, 0), 0) + require.NoError(t, err) + assert.Len(t, gasUpdates, 0) + + for i, ts := range th.blockTs { + // Should see all updates >= ts. + var expectedGas []cciptypes.GasPrice + var expectedDest0Gas []cciptypes.GasPrice + var expectedToken []cciptypes.TokenPrice + for j := i; j < len(th.blockTs); j++ { + expectedGas = append(expectedGas, th.expectedGasUpdates[th.blockTs[j]]...) + for _, g := range th.expectedGasUpdates[th.blockTs[j]] { + if g.DestChainSelector == th.destSelectors[0] { + expectedDest0Gas = append(expectedDest0Gas, g) + } + } + expectedToken = append(expectedToken, th.expectedTokenUpdates[th.blockTs[j]]...) + } + gasUpdates, err = pr.GetAllGasPriceUpdatesCreatedAfter(context.Background(), time.Unix(int64(ts-1), 0), 0) + require.NoError(t, err) + assert.Len(t, gasUpdates, len(expectedGas)) + + gasUpdates, err = pr.GetGasPriceUpdatesCreatedAfter(context.Background(), th.destSelectors[0], time.Unix(int64(ts-1), 0), 0) + require.NoError(t, err) + assert.Len(t, gasUpdates, len(expectedDest0Gas)) + + tokenUpdates, err2 := pr.GetTokenPriceUpdatesCreatedAfter(context.Background(), time.Unix(int64(ts-1), 0), 0) + require.NoError(t, err2) + assert.Len(t, tokenUpdates, len(expectedToken)) + } + + // Empty token set should return empty set no error. + gotEmpty, err := pr.GetTokenPrices(context.Background(), []cciptypes.Address{}) + require.NoError(t, err) + assert.Len(t, gotEmpty, 0) + + // We expect latest token prices to apply + allTokenUpdates, err := pr.GetTokenPriceUpdatesCreatedAfter(context.Background(), time.Unix(0, 0), 0) + require.NoError(t, err) + // Build latest map + latest := make(map[cciptypes.Address]*big.Int) + // Comes back in ascending order (oldest first) + var allTokens []cciptypes.Address + for i := len(allTokenUpdates) - 1; i >= 0; i-- { + assert.NoError(t, err) + _, have := latest[allTokenUpdates[i].Token] + if have { + continue + } + latest[allTokenUpdates[i].Token] = allTokenUpdates[i].Value + allTokens = append(allTokens, allTokenUpdates[i].Token) + } + tokenPrices, err := pr.GetTokenPrices(context.Background(), allTokens) + require.NoError(t, err) + require.Len(t, tokenPrices, len(allTokens)) + for _, p := range tokenPrices { + assert.Equal(t, p.Value, latest[p.Token]) + } +} + +func TestPriceRegistryReader(t *testing.T) { + th := setupPriceRegistryReaderTH(t) + // Assert all readers produce the same expected results. + for version, pr := range th.readers { + pr := pr + t.Run("PriceRegistryReader"+version, func(t *testing.T) { + testPriceRegistryReader(t, th, pr) + }) + } +} + +func TestNewPriceRegistryReader(t *testing.T) { + var tt = []struct { + typeAndVersion string + expectedErr string + }{ + { + typeAndVersion: "blah", + expectedErr: "unable to read type and version: invalid type and version blah", + }, + { + typeAndVersion: "EVM2EVMOffRamp 1.0.0", + expectedErr: "expected PriceRegistry got EVM2EVMOffRamp", + }, + { + typeAndVersion: "PriceRegistry 1.2.0", + expectedErr: "", + }, + { + typeAndVersion: "PriceRegistry 1.6.0-dev", + expectedErr: "", + }, + { + typeAndVersion: "PriceRegistry 2.0.0", + expectedErr: "unsupported price registry version 2.0.0", + }, + } + ctx := testutils.Context(t) + for _, tc := range tt { + t.Run(tc.typeAndVersion, func(t *testing.T) { + b, err := utils.ABIEncode(`[{"type":"string"}]`, tc.typeAndVersion) + require.NoError(t, err) + c := evmclientmocks.NewClient(t) + c.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Return(b, nil) + addr := ccipcalc.EvmAddrToGeneric(utils.RandomAddress()) + lp := lpmocks.NewLogPoller(t) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil).Maybe() + _, err = factory.NewPriceRegistryReader(ctx, logger.TestLogger(t), factory.NewEvmVersionFinder(), addr, lp, c) + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go new file mode 100644 index 00000000000..a9a07f0879b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader.go @@ -0,0 +1,78 @@ +package ccipdata + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/core/types" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +const ( + V1_0_0 = "1.0.0" + V1_1_0 = "1.1.0" + V1_2_0 = "1.2.0" + V1_4_0 = "1.4.0" + V1_5_0 = "1.5.0-dev" + V1_6_0 = "1.6.0-dev" +) + +const ( + // CommitExecLogsRetention defines the duration for which logs critical for Commit/Exec plugins processing are retained. + // Although Exec relies on permissionlessExecThreshold which is lower than 24hours for picking eligible CommitRoots, + // Commit still can reach to older logs because it filters them by sequence numbers. For instance, in case of RMN curse on chain, + // we might have logs waiting in OnRamp to be committed first. When outage takes days we still would + // be able to bring back processing without replaying any logs from chain. You can read that param as + // "how long CCIP can be down and still be able to process all the messages after getting back to life". + // Breaching this threshold would require replaying chain using LogPoller from the beginning of the outage. + CommitExecLogsRetention = 30 * 24 * time.Hour // 30 days + // CacheEvictionLogsRetention defines the duration for which logs used for caching on-chain data are kept. + // Restarting node clears the cache entirely and rebuilds it from scratch by fetching data from chain, + // so we don't need to keep these logs for very long. All events relying on cache.NewLogpollerEventsBased should use this retention. + CacheEvictionLogsRetention = 7 * 24 * time.Hour // 7 days + // PriceUpdatesLogsRetention defines the duration for which logs with price updates are kept. + // These logs are emitted whenever the token price or gas price is updated and Commit scans very small time windows (e.g. 2 hours) + PriceUpdatesLogsRetention = 1 * 24 * time.Hour // 1 day +) + +type Event[T any] struct { + Data T + cciptypes.TxMeta +} + +func LogsConfirmations(finalized bool) evmtypes.Confirmations { + if finalized { + return evmtypes.Finalized + } + return evmtypes.Unconfirmed +} + +func ParseLogs[T any](logs []logpoller.Log, lggr logger.Logger, parseFunc func(log types.Log) (*T, error)) ([]Event[T], error) { + reqs := make([]Event[T], 0, len(logs)) + + for _, log := range logs { + data, err := parseFunc(log.ToGethLog()) + if err != nil { + lggr.Errorw("Unable to parse log", "err", err) + continue + } + reqs = append(reqs, Event[T]{ + Data: *data, + TxMeta: cciptypes.TxMeta{ + BlockTimestampUnixMilli: log.BlockTimestamp.UnixMilli(), + BlockNumber: uint64(log.BlockNumber), + TxHash: log.TxHash.String(), + LogIndex: uint64(log.LogIndex), + }, + }) + } + + if len(logs) != len(reqs) { + return nil, fmt.Errorf("%d logs were not parsed", len(logs)-len(reqs)) + } + return reqs, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go new file mode 100644 index 00000000000..06766be81ee --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/reader_test.go @@ -0,0 +1,72 @@ +package ccipdata + +import ( + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func Test_parseLogs(t *testing.T) { + // generate 100 logs + logs := make([]logpoller.Log, 100) + for i := range logs { + logs[i].LogIndex = int64(i + 1) + logs[i].BlockNumber = int64(i) * 1000 + logs[i].BlockTimestamp = time.Now() + } + + parseFn := func(log types.Log) (*uint, error) { + return &log.Index, nil + } + + parsedEvents, err := ParseLogs[uint](logs, logger.TestLogger(t), parseFn) + require.NoError(t, err) + assert.Len(t, parsedEvents, 100) + + // Make sure everything is parsed according to the parse func + for i, ev := range parsedEvents { + assert.Equal(t, i+1, int(ev.Data)) + assert.Equal(t, i*1000, int(ev.BlockNumber)) + assert.Greater(t, ev.BlockTimestampUnixMilli, time.Now().Add(-time.Minute).UnixMilli()) + } +} + +func Test_parseLogs_withErrors(t *testing.T) { + // generate 50 valid logs and 50 errors + actualErrorCount := 50 + logs := make([]logpoller.Log, actualErrorCount*2) + for i := range logs { + logs[i].LogIndex = int64(i + 1) + } + + // return an error for half of the logs. + parseFn := func(log types.Log) (*uint, error) { + if log.Index%2 == 0 { + return nil, fmt.Errorf("cannot parse %d", log.Index) + } + return &log.Index, nil + } + + log, observed := logger.TestLoggerObserved(t, zapcore.DebugLevel) + parsedEvents, err := ParseLogs[uint](logs, log, parseFn) + assert.ErrorContains(t, err, fmt.Sprintf("%d logs were not parsed", len(logs)/2)) + assert.Nil(t, parsedEvents, "No events are returned if there was an error.") + + // logs are written for errors. + require.Equal(t, actualErrorCount, observed.Len(), "Expect 51 warnings: one for each error and a summary.") + for i, entry := range observed.All() { + assert.Equal(t, zapcore.ErrorLevel, entry.Level) + assert.Contains(t, entry.Message, "Unable to parse log") + contextMap := entry.ContextMap() + require.Contains(t, contextMap, "err") + assert.Contains(t, contextMap["err"], fmt.Sprintf("cannot parse %d", (i+1)*2), "each error should be logged as a warning") + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go new file mode 100644 index 00000000000..41161ee9388 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/retry_config.go @@ -0,0 +1,9 @@ +package ccipdata + +import "time" + +// RetryConfig configures an initial delay between retries and a max delay between retries +type RetryConfig struct { + InitialDelay time.Duration + MaxDelay time.Duration +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go new file mode 100644 index 00000000000..6dc51b888ed --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/test_utils.go @@ -0,0 +1,36 @@ +package ccipdata + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" +) + +// NewSimulation returns a client and a simulated backend. +func NewSimulation(t testing.TB) (*bind.TransactOpts, *client.SimulatedBackendClient) { + user := testutils.MustNewSimTransactor(t) + simulatedBackend := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{ + user.From: { + Balance: big.NewInt(0).Mul(big.NewInt(3), big.NewInt(1e18)), + }, + }, 10e6) + simulatedBackendClient := client.NewSimulatedBackendClient(t, simulatedBackend, testutils.SimulatedChainID) + return user, simulatedBackendClient +} + +// AssertNonRevert Verify that a transaction was not reverted. +func AssertNonRevert(t testing.TB, tx *types.Transaction, bc *client.SimulatedBackendClient, user *bind.TransactOpts) { + require.NotNil(t, tx, "Transaction should not be nil") + receipt, err := bc.TransactionReceipt(user.Context, tx.Hash()) + require.NoError(t, err) + require.NotEqual(t, uint64(0), receipt.Status, "Transaction should not have reverted") +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go new file mode 100644 index 00000000000..999061f4913 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/token_pool_reader.go @@ -0,0 +1,10 @@ +package ccipdata + +import ( + "github.com/ethereum/go-ethereum/common" +) + +type TokenPoolReader interface { + Address() common.Address + Type() string +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go new file mode 100644 index 00000000000..51ce0db7c04 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader.go @@ -0,0 +1,169 @@ +package ccipdata + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/patrickmn/go-cache" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +var ( + // shortLivedInMemLogsCacheExpiration is used for the short-lived in meme logs cache. + // Value should usually be set to just a few seconds, a larger duration will not increase performance and might + // cause performance issues on re-orged logs. + shortLivedInMemLogsCacheExpiration = 20 * time.Second +) + +const ( + MESSAGE_SENT_FILTER_NAME = "USDC message sent" +) + +var _ USDCReader = &USDCReaderImpl{} + +type USDCReader interface { + // GetUSDCMessagePriorToLogIndexInTx returns the specified USDC message data. + // e.g. if msg contains 3 tokens: [usdc1, wETH, usdc2] ignoring non-usdc tokens + // if usdcTokenIndexOffset is 0 we select usdc2 + // if usdcTokenIndexOffset is 1 we select usdc1 + // The message logs are found using the provided transaction hash. + GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error) +} + +type USDCReaderImpl struct { + usdcMessageSent common.Hash + lp logpoller.LogPoller + filter logpoller.Filter + lggr logger.Logger + transmitterAddress common.Address + + // shortLivedInMemLogs is a short-lived cache (items expire every few seconds) + // used to prevent frequent log fetching from the log poller + shortLivedInMemLogs *cache.Cache +} + +func (u *USDCReaderImpl) Close() error { + // FIXME Dim pgOpts removed from LogPoller + return u.lp.UnregisterFilter(context.Background(), u.filter.Name) +} + +func (u *USDCReaderImpl) RegisterFilters() error { + // FIXME Dim pgOpts removed from LogPoller + return u.lp.RegisterFilter(context.Background(), u.filter) +} + +// usdcPayload has to match the onchain event emitted by the USDC message transmitter +type usdcPayload []byte + +func (d usdcPayload) AbiString() string { + return `[{"type": "bytes"}]` +} + +func (d usdcPayload) Validate() error { + if len(d) == 0 { + return errors.New("must be non-empty") + } + return nil +} + +func parseUSDCMessageSent(logData []byte) ([]byte, error) { + decodeAbiStruct, err := abihelpers.DecodeAbiStruct[usdcPayload](logData) + if err != nil { + return nil, err + } + return decodeAbiStruct, nil +} + +func (u *USDCReaderImpl) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex int64, usdcTokenIndexOffset int, txHash string) ([]byte, error) { + var lpLogs []logpoller.Log + + // fetch all the usdc logs for the provided tx hash + k := fmt.Sprintf("usdc-%s", txHash) // custom prefix to avoid key collision if someone re-uses the cache + if rawLogs, foundInMem := u.shortLivedInMemLogs.Get(k); foundInMem { + inMemLogs, ok := rawLogs.([]logpoller.Log) + if !ok { + return nil, errors.Errorf("unexpected in-mem logs type %T", rawLogs) + } + u.lggr.Debugw("found logs in memory", "k", k, "len", len(inMemLogs)) + lpLogs = inMemLogs + } + + if len(lpLogs) == 0 { + u.lggr.Debugw("fetching logs from lp", "k", k) + logs, err := u.lp.IndexedLogsByTxHash( + ctx, + u.usdcMessageSent, + u.transmitterAddress, + common.HexToHash(txHash), + ) + if err != nil { + return nil, err + } + lpLogs = logs + u.shortLivedInMemLogs.Set(k, logs, cache.DefaultExpiration) + u.lggr.Debugw("fetched logs from lp", "logs", len(lpLogs)) + } + + // collect the logs with log index less than the provided log index + allUsdcTokensData := make([][]byte, 0) + for _, current := range lpLogs { + if current.LogIndex < logIndex { + u.lggr.Infow("Found USDC message", "logIndex", current.LogIndex, "txHash", current.TxHash.Hex(), "data", hexutil.Encode(current.Data)) + allUsdcTokensData = append(allUsdcTokensData, current.Data) + } + } + + usdcTokenIndex := (len(allUsdcTokensData) - 1) - usdcTokenIndexOffset + + if usdcTokenIndex < 0 || usdcTokenIndex >= len(allUsdcTokensData) { + u.lggr.Errorw("usdc message not found", + "logIndex", logIndex, + "allUsdcTokenData", len(allUsdcTokensData), + "txHash", txHash, + "usdcTokenIndex", usdcTokenIndex, + ) + return nil, errors.Errorf("usdc token index %d is not valid", usdcTokenIndex) + } + return parseUSDCMessageSent(allUsdcTokensData[usdcTokenIndex]) +} + +func NewUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller, registerFilters bool) (*USDCReaderImpl, error) { + eventSig := utils.Keccak256Fixed([]byte("MessageSent(bytes)")) + + r := &USDCReaderImpl{ + lggr: lggr, + lp: lp, + usdcMessageSent: eventSig, + filter: logpoller.Filter{ + Name: logpoller.FilterName(MESSAGE_SENT_FILTER_NAME, jobID, transmitter.Hex()), + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{transmitter}, + Retention: CommitExecLogsRetention, + }, + transmitterAddress: transmitter, + shortLivedInMemLogs: cache.New(shortLivedInMemLogsCacheExpiration, 2*shortLivedInMemLogsCacheExpiration), + } + + if registerFilters { + if err := r.RegisterFilters(); err != nil { + return nil, fmt.Errorf("register filters: %w", err) + } + } + return r, nil +} + +func CloseUSDCReader(lggr logger.Logger, jobID string, transmitter common.Address, lp logpoller.LogPoller) error { + r, err := NewUSDCReader(lggr, jobID, transmitter, lp, false) + if err != nil { + return err + } + return r.Close() +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go new file mode 100644 index 00000000000..a5f0a1ffd06 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/usdc_reader_internal_test.go @@ -0,0 +1,178 @@ +package ccipdata + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/headtracker" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestLogPollerClient_GetUSDCMessagePriorToLogIndexInTx(t *testing.T) { + addr := utils.RandomAddress() + txHash := common.BytesToHash(addr[:]) + ccipLogIndex := int64(100) + + expectedData := "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc08610000000000000000" + expectedPostParse := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861" + lggr := logger.TestLogger(t) + + t.Run("multiple found - selected last", func(t *testing.T) { + lp := lpmocks.NewLogPoller(t) + u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false) + + lp.On("IndexedLogsByTxHash", + mock.Anything, + u.usdcMessageSent, + u.transmitterAddress, + txHash, + ).Return([]logpoller.Log{ + {LogIndex: ccipLogIndex - 2, Data: []byte("-2")}, + {LogIndex: ccipLogIndex - 1, Data: hexutil.MustDecode(expectedData)}, + {LogIndex: ccipLogIndex, Data: []byte("0")}, + {LogIndex: ccipLogIndex + 1, Data: []byte("1")}, + }, nil) + usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 0, txHash.String()) + assert.NoError(t, err) + assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData)) + lp.AssertExpectations(t) + }) + + t.Run("multiple found - selected first", func(t *testing.T) { + lp := lpmocks.NewLogPoller(t) + u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false) + + lp.On("IndexedLogsByTxHash", + mock.Anything, + u.usdcMessageSent, + u.transmitterAddress, + txHash, + ).Return([]logpoller.Log{ + {LogIndex: ccipLogIndex - 2, Data: hexutil.MustDecode(expectedData)}, + {LogIndex: ccipLogIndex - 1, Data: []byte("-2")}, + {LogIndex: ccipLogIndex, Data: []byte("0")}, + {LogIndex: ccipLogIndex + 1, Data: []byte("1")}, + }, nil) + usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String()) + assert.NoError(t, err) + assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData)) + lp.AssertExpectations(t) + }) + + t.Run("logs fetched from memory in subsequent calls", func(t *testing.T) { + lp := lpmocks.NewLogPoller(t) + u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false) + + lp.On("IndexedLogsByTxHash", + mock.Anything, + u.usdcMessageSent, + u.transmitterAddress, + txHash, + ).Return([]logpoller.Log{ + {LogIndex: ccipLogIndex - 2, Data: hexutil.MustDecode(expectedData)}, + {LogIndex: ccipLogIndex - 1, Data: []byte("-2")}, + {LogIndex: ccipLogIndex, Data: []byte("0")}, + {LogIndex: ccipLogIndex + 1, Data: []byte("1")}, + }, nil).Once() + + // first call logs must be fetched from lp + usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String()) + assert.NoError(t, err) + assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData)) + + // subsequent call, logs must be fetched from memory + usdcMessageData, err = u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 1, txHash.String()) + assert.NoError(t, err) + assert.Equal(t, expectedPostParse, hexutil.Encode(usdcMessageData)) + + lp.AssertExpectations(t) + }) + + t.Run("none found", func(t *testing.T) { + lp := lpmocks.NewLogPoller(t) + u, _ := NewUSDCReader(lggr, "job_123", utils.RandomAddress(), lp, false) + lp.On("IndexedLogsByTxHash", + mock.Anything, + u.usdcMessageSent, + u.transmitterAddress, + txHash, + ).Return([]logpoller.Log{}, nil) + + usdcMessageData, err := u.GetUSDCMessagePriorToLogIndexInTx(context.Background(), ccipLogIndex, 0, txHash.String()) + assert.Errorf(t, err, fmt.Sprintf("no USDC message found prior to log index %d in tx %s", ccipLogIndex, txHash.Hex())) + assert.Nil(t, usdcMessageData) + + lp.AssertExpectations(t) + }) +} + +func TestParse(t *testing.T) { + expectedBody, err := hexutil.Decode("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc08610000000000000000") + require.NoError(t, err) + + parsedBody, err := parseUSDCMessageSent(expectedBody) + require.NoError(t, err) + + expectedPostParse := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861" + + require.Equal(t, expectedPostParse, hexutil.Encode(parsedBody)) +} + +func TestFilters(t *testing.T) { + t.Run("filters of different jobs should be distinct", func(t *testing.T) { + lggr := logger.TestLogger(t) + chainID := testutils.NewRandomEVMChainID() + db := pgtest.NewSqlxDB(t) + o := logpoller.NewORM(chainID, db, lggr) + ec := backends.NewSimulatedBackend(map[common.Address]core.GenesisAccount{}, 10e6) + esc := client.NewSimulatedBackendClient(t, ec, chainID) + lpOpts := logpoller.Opts{ + PollPeriod: 1 * time.Hour, + FinalityDepth: 1, + BackfillBatchSize: 1, + RpcBatchSize: 1, + KeepFinalizedBlocksDepth: 100, + } + headTracker := headtracker.NewSimulatedHeadTracker(esc, lpOpts.UseFinalityTag, lpOpts.FinalityDepth) + if lpOpts.PollPeriod == 0 { + lpOpts.PollPeriod = 1 * time.Hour + } + lp := logpoller.NewLogPoller(o, esc, lggr, headTracker, lpOpts) + + jobID1 := "job-1" + jobID2 := "job-2" + transmitter := utils.RandomAddress() + + f1 := logpoller.FilterName("USDC message sent", jobID1, transmitter.Hex()) + f2 := logpoller.FilterName("USDC message sent", jobID2, transmitter.Hex()) + + _, err := NewUSDCReader(lggr, jobID1, transmitter, lp, true) + assert.NoError(t, err) + assert.True(t, lp.HasFilter(f1)) + + _, err = NewUSDCReader(lggr, jobID2, transmitter, lp, true) + assert.NoError(t, err) + assert.True(t, lp.HasFilter(f2)) + + err = CloseUSDCReader(lggr, jobID2, transmitter, lp) + assert.NoError(t, err) + assert.True(t, lp.HasFilter(f1)) + assert.False(t, lp.HasFilter(f2)) + }) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go new file mode 100644 index 00000000000..3e58143a284 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store.go @@ -0,0 +1,456 @@ +package v1_0_0 + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/types/query" + "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +const ( + EXEC_REPORT_ACCEPTS = "Exec report accepts" + ReportAccepted = "ReportAccepted" +) + +var _ ccipdata.CommitStoreReader = &CommitStore{} + +type CommitStore struct { + // Static config + commitStore *commit_store_1_0_0.CommitStore + lggr logger.Logger + lp logpoller.LogPoller + address common.Address + estimator *gas.EvmFeeEstimator + sourceMaxGasPrice *big.Int + filters []logpoller.Filter + reportAcceptedSig common.Hash + reportAcceptedMaxSeqIndex int + commitReportArgs abi.Arguments + + // Dynamic config + configMu sync.RWMutex + gasPriceEstimator prices.ExecGasPriceEstimator + offchainConfig cciptypes.CommitOffchainConfig +} + +func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + legacyConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.CommitStoreStaticConfig{}, errors.New("Could not get commitStore static config") + } + return cciptypes.CommitStoreStaticConfig{ + ChainSelector: legacyConfig.ChainSelector, + SourceChainSelector: legacyConfig.SourceChainSelector, + OnRamp: ccipcalc.EvmAddrToGeneric(legacyConfig.OnRamp), + ArmProxy: ccipcalc.EvmAddrToGeneric(legacyConfig.ArmProxy), + }, nil +} + +func (c *CommitStore) EncodeCommitReport(_ context.Context, report cciptypes.CommitStoreReport) ([]byte, error) { + return encodeCommitReport(c.commitReportArgs, report) +} + +func encodeCommitReport(commitReportArgs abi.Arguments, report cciptypes.CommitStoreReport) ([]byte, error) { + var tokenPriceUpdates []commit_store_1_0_0.InternalTokenPriceUpdate + for _, tokenPriceUpdate := range report.TokenPrices { + sourceTokenEvmAddr, err := ccipcalc.GenericAddrToEvm(tokenPriceUpdate.Token) + if err != nil { + return nil, err + } + tokenPriceUpdates = append(tokenPriceUpdates, commit_store_1_0_0.InternalTokenPriceUpdate{ + SourceToken: sourceTokenEvmAddr, + UsdPerToken: tokenPriceUpdate.Value, + }) + } + var usdPerUnitGas = big.NewInt(0) + var destChainSelector = uint64(0) + if len(report.GasPrices) > 1 { + return []byte{}, errors.Errorf("CommitStore V1_0_0 can only accept 1 gas price, received: %d", len(report.GasPrices)) + } + if len(report.GasPrices) > 0 { + usdPerUnitGas = report.GasPrices[0].Value + destChainSelector = report.GasPrices[0].DestChainSelector + } + rep := commit_store_1_0_0.CommitStoreCommitReport{ + PriceUpdates: commit_store_1_0_0.InternalPriceUpdates{ + TokenPriceUpdates: tokenPriceUpdates, + UsdPerUnitGas: usdPerUnitGas, + DestChainSelector: destChainSelector, + }, + Interval: commit_store_1_0_0.CommitStoreInterval{Min: report.Interval.Min, Max: report.Interval.Max}, + MerkleRoot: report.MerkleRoot, + } + return commitReportArgs.PackValues([]interface{}{rep}) +} + +func DecodeCommitReport(commitReportArgs abi.Arguments, report []byte) (cciptypes.CommitStoreReport, error) { + unpacked, err := commitReportArgs.Unpack(report) + if err != nil { + return cciptypes.CommitStoreReport{}, err + } + if len(unpacked) != 1 { + return cciptypes.CommitStoreReport{}, errors.New("expected single struct value") + } + + commitReport, ok := unpacked[0].(struct { + PriceUpdates struct { + TokenPriceUpdates []struct { + SourceToken common.Address `json:"sourceToken"` + UsdPerToken *big.Int `json:"usdPerToken"` + } `json:"tokenPriceUpdates"` + DestChainSelector uint64 `json:"destChainSelector"` + UsdPerUnitGas *big.Int `json:"usdPerUnitGas"` + } `json:"priceUpdates"` + Interval struct { + Min uint64 `json:"min"` + Max uint64 `json:"max"` + } `json:"interval"` + MerkleRoot [32]byte `json:"merkleRoot"` + }) + if !ok { + return cciptypes.CommitStoreReport{}, errors.Errorf("invalid commit report got %T", unpacked[0]) + } + + var tokenPriceUpdates []cciptypes.TokenPrice + for _, u := range commitReport.PriceUpdates.TokenPriceUpdates { + tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{ + Token: cciptypes.Address(u.SourceToken.String()), + Value: u.UsdPerToken, + }) + } + + var gasPrices []cciptypes.GasPrice + if commitReport.PriceUpdates.DestChainSelector != 0 { + // No gas price update { + gasPrices = append(gasPrices, cciptypes.GasPrice{ + DestChainSelector: commitReport.PriceUpdates.DestChainSelector, + Value: commitReport.PriceUpdates.UsdPerUnitGas, + }) + } + + return cciptypes.CommitStoreReport{ + TokenPrices: tokenPriceUpdates, + GasPrices: gasPrices, + Interval: cciptypes.CommitStoreInterval{ + Min: commitReport.Interval.Min, + Max: commitReport.Interval.Max, + }, + MerkleRoot: commitReport.MerkleRoot, + }, nil +} + +func (c *CommitStore) DecodeCommitReport(_ context.Context, report []byte) (cciptypes.CommitStoreReport, error) { + return DecodeCommitReport(c.commitReportArgs, report) +} + +func (c *CommitStore) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return c.commitStore.IsBlessed(&bind.CallOpts{Context: ctx}, root) +} + +func (c *CommitStore) OffchainConfig(context.Context) (cciptypes.CommitOffchainConfig, error) { + c.configMu.RLock() + defer c.configMu.RUnlock() + return c.offchainConfig, nil +} + +func (c *CommitStore) GasPriceEstimator(context.Context) (cciptypes.GasPriceEstimatorCommit, error) { + c.configMu.RLock() + defer c.configMu.RUnlock() + return c.gasPriceEstimator, nil +} + +func (c *CommitStore) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error { + c.configMu.RLock() + defer c.configMu.RUnlock() + c.estimator = &gpe + return nil +} + +func (c *CommitStore) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error { + c.configMu.RLock() + defer c.configMu.RUnlock() + c.sourceMaxGasPrice = sourceMaxGasPrice + return nil +} + +// CommitOffchainConfig is a legacy version of CommitOffchainConfig, used for CommitStore version 1.0.0 and 1.1.0 +type CommitOffchainConfig struct { + SourceFinalityDepth uint32 + DestFinalityDepth uint32 + FeeUpdateHeartBeat config.Duration + FeeUpdateDeviationPPB uint32 + InflightCacheExpiry config.Duration + PriceReportingDisabled bool +} + +func (c CommitOffchainConfig) Validate() error { + if c.SourceFinalityDepth == 0 { + return errors.New("must set SourceFinalityDepth") + } + if c.DestFinalityDepth == 0 { + return errors.New("must set DestFinalityDepth") + } + if c.FeeUpdateHeartBeat.Duration() == 0 { + return errors.New("must set FeeUpdateHeartBeat") + } + if c.FeeUpdateDeviationPPB == 0 { + return errors.New("must set FeeUpdateDeviationPPB") + } + if c.InflightCacheExpiry.Duration() == 0 { + return errors.New("must set InflightCacheExpiry") + } + + return nil +} + +func (c *CommitStore) ChangeConfig(_ context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) { + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onchainConfig) + if err != nil { + return "", err + } + + offchainConfigV1, err := ccipconfig.DecodeOffchainConfig[CommitOffchainConfig](offchainConfig) + if err != nil { + return "", err + } + c.configMu.Lock() + defer c.configMu.Unlock() + + if c.estimator == nil { + return "", fmt.Errorf("this CommitStore estimator is nil. SetGasEstimator should be called before ChangeConfig") + } + + if c.sourceMaxGasPrice == nil { + return "", fmt.Errorf("this CommitStore sourceMaxGasPrice is nil. SetSourceMaxGasPrice should be called before ChangeConfig") + } + + c.gasPriceEstimator = prices.NewExecGasPriceEstimator( + *c.estimator, + c.sourceMaxGasPrice, + int64(offchainConfigV1.FeeUpdateDeviationPPB)) + c.offchainConfig = ccipdata.NewCommitOffchainConfig( + offchainConfigV1.FeeUpdateDeviationPPB, + offchainConfigV1.FeeUpdateHeartBeat.Duration(), + offchainConfigV1.FeeUpdateDeviationPPB, + offchainConfigV1.FeeUpdateHeartBeat.Duration(), + offchainConfigV1.InflightCacheExpiry.Duration(), + offchainConfigV1.PriceReportingDisabled) + c.lggr.Infow("ChangeConfig", + "offchainConfig", offchainConfigV1, + "onchainConfig", onchainConfigParsed, + ) + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil +} + +func (c *CommitStore) Close() error { + return logpollerutil.UnregisterLpFilters(c.lp, c.filters) +} + +func (c *CommitStore) parseReport(log types.Log) (*cciptypes.CommitStoreReport, error) { + repAccepted, err := c.commitStore.ParseReportAccepted(log) + if err != nil { + return nil, err + } + // Translate to common struct. + var tokenPrices []cciptypes.TokenPrice + for _, tpu := range repAccepted.Report.PriceUpdates.TokenPriceUpdates { + tokenPrices = append(tokenPrices, cciptypes.TokenPrice{ + Token: cciptypes.Address(tpu.SourceToken.String()), + Value: tpu.UsdPerToken, + }) + } + return &cciptypes.CommitStoreReport{ + TokenPrices: tokenPrices, + GasPrices: []cciptypes.GasPrice{{DestChainSelector: repAccepted.Report.PriceUpdates.DestChainSelector, Value: repAccepted.Report.PriceUpdates.UsdPerUnitGas}}, + MerkleRoot: repAccepted.Report.MerkleRoot, + Interval: cciptypes.CommitStoreInterval{Min: repAccepted.Report.Interval.Min, Max: repAccepted.Report.Interval.Max}, + }, nil +} + +func (c *CommitStore) GetCommitReportMatchingSeqNum(ctx context.Context, seqNr uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + logs, err := c.lp.LogsDataWordBetween( + ctx, + c.reportAcceptedSig, + c.address, + c.reportAcceptedMaxSeqIndex-1, + c.reportAcceptedMaxSeqIndex, + logpoller.EvmWord(seqNr), + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport]( + logs, + c.lggr, + c.parseReport, + ) + if err != nil { + return nil, err + } + + res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.CommitStoreReportWithTxMeta{ + TxMeta: log.TxMeta, + CommitStoreReport: log.Data, + }) + } + + if len(res) > 1 { + c.lggr.Errorw("More than one report found for seqNr", "seqNr", seqNr, "commitReports", parsedLogs) + return res[:1], nil + } + return res, nil +} + +func (c *CommitStore) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + latestBlock, err := c.lp.LatestBlock(ctx) + if err != nil { + return nil, err + } + + reportsQuery, err := query.Where( + c.address.String(), + logpoller.NewAddressFilter(c.address), + logpoller.NewEventSigFilter(c.reportAcceptedSig), + query.Timestamp(uint64(ts.Unix()), primitives.Gte), + logpoller.NewConfirmationsFilter(evmtypes.Confirmations(confs)), + ) + if err != nil { + return nil, err + } + + logs, err := c.lp.FilteredLogs( + ctx, + reportsQuery, + query.NewLimitAndSort(query.Limit{}, query.NewSortBySequence(query.Asc)), + "GetAcceptedCommitReportsGteTimestamp", + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](logs, c.lggr, c.parseReport) + if err != nil { + return nil, fmt.Errorf("parse logs: %w", err) + } + + parsedReports := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + parsedReports = append(parsedReports, cciptypes.CommitStoreReportWithTxMeta{ + TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)), + CommitStoreReport: log.Data, + }) + } + + return parsedReports, nil +} + +func (c *CommitStore) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + return c.commitStore.GetExpectedNextSequenceNumber(&bind.CallOpts{Context: ctx}) +} + +func (c *CommitStore) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + return c.commitStore.GetLatestPriceEpochAndRound(&bind.CallOpts{Context: ctx}) +} + +func (c *CommitStore) IsDestChainHealthy(context.Context) (bool, error) { + if err := c.lp.Healthy(); err != nil { + return false, nil + } + return true, nil +} + +func (c *CommitStore) IsDown(ctx context.Context) (bool, error) { + unPausedAndHealthy, err := c.commitStore.IsUnpausedAndARMHealthy(&bind.CallOpts{Context: ctx}) + if err != nil { + return true, err + } + return !unPausedAndHealthy, nil +} + +func (c *CommitStore) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + var hashes [][32]byte + for _, msg := range report.Messages { + hashes = append(hashes, msg.Hash) + } + res, err := c.commitStore.Verify(&bind.CallOpts{Context: ctx}, hashes, report.Proofs, report.ProofFlagBits) + if err != nil { + c.lggr.Errorw("Unable to call verify", "messages", report.Messages, "err", err) + return false, nil + } + // No timestamp, means failed to verify root. + if res.Cmp(big.NewInt(0)) == 0 { + c.lggr.Errorw("Root does not verify", "messages", report.Messages) + return false, nil + } + return true, nil +} + +func (c *CommitStore) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(c.lp, c.filters) +} + +func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) { + commitStore, err := commit_store_1_0_0.NewCommitStore(addr, ec) + if err != nil { + return nil, err + } + commitStoreABI := abihelpers.MustParseABI(commit_store_1_0_0.CommitStoreABI) + eventSig := abihelpers.MustGetEventID(ReportAccepted, commitStoreABI) + commitReportArgs := abihelpers.MustGetEventInputs(ReportAccepted, commitStoreABI) + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(EXEC_REPORT_ACCEPTS, addr.String()), + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{addr}, + Retention: ccipdata.CommitExecLogsRetention, + }, + } + return &CommitStore{ + commitStore: commitStore, + address: addr, + lggr: lggr, + lp: lp, + + // Note that sourceMaxGasPrice and estimator now have explicit setters (CCIP-2493) + + filters: filters, + commitReportArgs: commitReportArgs, + reportAcceptedSig: eventSig, + // offset || priceUpdatesOffset || minSeqNum || maxSeqNum || merkleRoot + reportAcceptedMaxSeqIndex: 3, + configMu: sync.RWMutex{}, + + // The fields below are initially empty and set on ChangeConfig method + offchainConfig: cciptypes.CommitOffchainConfig{}, + gasPriceEstimator: prices.ExecGasPriceEstimator{}, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go new file mode 100644 index 00000000000..31bcaf8a187 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/commit_store_test.go @@ -0,0 +1,49 @@ +package v1_0_0 + +import ( + "math/big" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestCommitReportEncoding(t *testing.T) { + ctx := testutils.Context(t) + report := cciptypes.CommitStoreReport{ + TokenPrices: []cciptypes.TokenPrice{ + { + Token: cciptypes.Address(utils.RandomAddress().String()), + Value: big.NewInt(9e18), + }, + }, + GasPrices: []cciptypes.GasPrice{ + { + DestChainSelector: rand.Uint64(), + Value: big.NewInt(2000e9), + }, + }, + MerkleRoot: [32]byte{123}, + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + } + + c, err := NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, mocks.NewLogPoller(t)) + assert.NoError(t, err) + + encodedReport, err := c.EncodeCommitReport(ctx, report) + require.NoError(t, err) + assert.Greater(t, len(encodedReport), 0) + + decodedReport, err := c.DecodeCommitReport(ctx, encodedReport) + require.NoError(t, err) + require.Equal(t, report.TokenPrices, decodedReport.TokenPrices) + require.Equal(t, report, decodedReport) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go new file mode 100644 index 00000000000..0d1b7f736f6 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher.go @@ -0,0 +1,85 @@ +package v1_0_0 + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +const ( + MetaDataHashPrefix = "EVM2EVMMessageEvent" +) + +var LeafDomainSeparator = [1]byte{0x00} + +type LeafHasher struct { + metaDataHash [32]byte + ctx hashutil.Hasher[[32]byte] + onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp +} + +func GetMetaDataHash[H hashutil.Hash](ctx hashutil.Hasher[H], prefix [32]byte, sourceChainSelector uint64, onRampId common.Address, destChainSelector uint64) H { + paddedOnRamp := common.BytesToHash(onRampId[:]) + return ctx.Hash(utils.ConcatBytes(prefix[:], math.U256Bytes(big.NewInt(0).SetUint64(sourceChainSelector)), math.U256Bytes(big.NewInt(0).SetUint64(destChainSelector)), paddedOnRamp[:])) +} + +func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp) *LeafHasher { + return &LeafHasher{ + metaDataHash: GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector), + ctx: ctx, + onRamp: onRamp, + } +} + +func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) { + message, err := t.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return [32]byte{}, err + } + encodedTokens, err := abihelpers.ABIEncode( + `[ +{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.Message.TokenAmounts) + if err != nil { + return [32]byte{}, err + } + + packedValues, err := abihelpers.ABIEncode( + `[ +{"name": "leafDomainSeparator","type":"bytes1"}, +{"name": "metadataHash", "type":"bytes32"}, +{"name": "sequenceNumber", "type":"uint64"}, +{"name": "nonce", "type":"uint64"}, +{"name": "sender", "type":"address"}, +{"name": "receiver", "type":"address"}, +{"name": "dataHash", "type":"bytes32"}, +{"name": "tokenAmountsHash", "type":"bytes32"}, +{"name": "gasLimit", "type":"uint256"}, +{"name": "strict", "type":"bool"}, +{"name": "feeToken","type": "address"}, +{"name": "feeTokenAmount","type": "uint256"} +]`, + LeafDomainSeparator, + t.metaDataHash, + message.Message.SequenceNumber, + message.Message.Nonce, + message.Message.Sender, + message.Message.Receiver, + t.ctx.Hash(message.Message.Data), + t.ctx.Hash(encodedTokens), + message.Message.GasLimit, + message.Message.Strict, + message.Message.FeeToken, + message.Message.FeeTokenAmount, + ) + if err != nil { + return [32]byte{}, err + } + return t.ctx.Hash(packedValues), nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go new file mode 100644 index 00000000000..b1219a27dfa --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/hasher_test.go @@ -0,0 +1,84 @@ +package v1_0_0 + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +func TestHasherV1_0_0(t *testing.T) { + sourceChainSelector, destChainSelector := uint64(1), uint64(4) + onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001") + onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_0_0.EVM2EVMOnRampABI) + + ramp, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampAddress, nil) + require.NoError(t, err) + hashingCtx := hashutil.NewKeccak() + hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp) + + message := evm_2_evm_onramp_1_0_0.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1), + Data: []byte{}, + TokenAmounts: []evm_2_evm_onramp_1_0_0.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}}, + MessageId: [32]byte{}, + } + + data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{abihelpers.MustGetEventID("CCIPSendRequested", onRampABI)}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "26f282c6ac8231933b1799648d01ff6cec792a33fb37408b4d135968f9168ace", hex.EncodeToString(hash[:])) + + message = evm_2_evm_onramp_1_0_0.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1e12), + Data: []byte("foo bar baz"), + TokenAmounts: []evm_2_evm_onramp_1_0_0.ClientEVMTokenAmount{ + {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}, + {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)}, + }, + MessageId: [32]byte{}, + } + + data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{abihelpers.MustGetEventID("CCIPSendRequested", onRampABI)}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "05cee92e7cb86a37b6536554828a5b21ff20ac3d4ef821ec47056f1d963313de", hex.EncodeToString(hash[:])) +} + +func TestMetaDataHash(t *testing.T) { + sourceChainSelector, destChainSelector := uint64(1), uint64(4) + onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001") + ctx := hashutil.NewKeccak() + hash := GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampAddress, destChainSelector) + require.Equal(t, "1409948abde219f43870c3d6d1c16beabd8878eb5039a3fa765eb56e4b8ded9e", hex.EncodeToString(hash[:])) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go new file mode 100644 index 00000000000..137cbaf451d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp.go @@ -0,0 +1,689 @@ +package v1_0_0 + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +const ( + EXEC_EXECUTION_STATE_CHANGES = "Exec execution state changes" + EXEC_TOKEN_POOL_ADDED = "Token pool added" + EXEC_TOKEN_POOL_REMOVED = "Token pool removed" +) + +var ( + abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampABI) + _ ccipdata.OffRampReader = &OffRamp{} + ExecutionStateChangedEvent = abihelpers.MustGetEventID("ExecutionStateChanged", abiOffRamp) + PoolAddedEvent = abihelpers.MustGetEventID("PoolAdded", abiOffRamp) + PoolRemovedEvent = abihelpers.MustGetEventID("PoolRemoved", abiOffRamp) + ExecutionStateChangedSeqNrIndex = 1 +) + +var offRamp_poolAddedPoolRemovedEvents = []common.Hash{PoolAddedEvent, PoolRemovedEvent} + +type ExecOnchainConfig evm_2_evm_offramp_1_0_0.EVM2EVMOffRampDynamicConfig + +func (d ExecOnchainConfig) AbiString() string { + return ` + [ + { + "components": [ + {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"}, + {"name": "router", "type": "address"}, + {"name": "priceRegistry", "type": "address"}, + {"name": "maxTokensLength", "type": "uint16"}, + {"name": "maxDataSize", "type": "uint32"} + ], + "type": "tuple" + } + ]` +} + +func (d ExecOnchainConfig) Validate() error { + if d.PermissionLessExecutionThresholdSeconds == 0 { + return errors.New("must set PermissionLessExecutionThresholdSeconds") + } + if d.Router == (common.Address{}) { + return errors.New("must set Router address") + } + if d.PriceRegistry == (common.Address{}) { + return errors.New("must set PriceRegistry address") + } + if d.MaxTokensLength == 0 { + return errors.New("must set MaxTokensLength") + } + if d.MaxDataSize == 0 { + return errors.New("must set MaxDataSize") + } + return nil +} + +// ExecOffchainConfig is the configuration for nodes executing committed CCIP messages (v1.0–v1.2). +// It comes from the OffchainConfig field of the corresponding OCR2 plugin configuration. +// NOTE: do not change the JSON format of this struct without consulting with the RDD people first. +type ExecOffchainConfig struct { + // SourceFinalityDepth indicates how many confirmations a transaction should get on the source chain event before we consider it finalized. + SourceFinalityDepth uint32 + // See [ccipdata.ExecOffchainConfig.DestOptimisticConfirmations] + DestOptimisticConfirmations uint32 + // DestFinalityDepth indicates how many confirmations a transaction should get on the destination chain event before we consider it finalized. + DestFinalityDepth uint32 + // See [ccipdata.ExecOffchainConfig.BatchGasLimit] + BatchGasLimit uint32 + // See [ccipdata.ExecOffchainConfig.RelativeBoostPerWaitHour] + RelativeBoostPerWaitHour float64 + // See [ccipdata.ExecOffchainConfig.InflightCacheExpiry] + InflightCacheExpiry config.Duration + // See [ccipdata.ExecOffchainConfig.RootSnoozeTime] + RootSnoozeTime config.Duration + // See [ccipdata.ExecOffchainConfig.BatchingStrategyID] + BatchingStrategyID uint32 + // See [ccipdata.ExecOffchainConfig.MessageVisibilityInterval] + MessageVisibilityInterval config.Duration +} + +func (c ExecOffchainConfig) Validate() error { + if c.SourceFinalityDepth == 0 { + return errors.New("must set SourceFinalityDepth") + } + if c.DestFinalityDepth == 0 { + return errors.New("must set DestFinalityDepth") + } + if c.DestOptimisticConfirmations == 0 { + return errors.New("must set DestOptimisticConfirmations") + } + if c.BatchGasLimit == 0 { + return errors.New("must set BatchGasLimit") + } + if c.RelativeBoostPerWaitHour == 0 { + return errors.New("must set RelativeBoostPerWaitHour") + } + if c.InflightCacheExpiry.Duration() == 0 { + return errors.New("must set InflightCacheExpiry") + } + if c.RootSnoozeTime.Duration() == 0 { + return errors.New("must set RootSnoozeTime") + } + + return nil +} + +type OffRamp struct { + offRampV100 evm_2_evm_offramp_1_0_0.EVM2EVMOffRampInterface + addr common.Address + lp logpoller.LogPoller + Logger logger.Logger + Client client.Client + evmBatchCaller rpclib.EvmBatchCaller + filters []logpoller.Filter + Estimator gas.EvmFeeEstimator + DestMaxGasPrice *big.Int + ExecutionReportArgs abi.Arguments + eventIndex int + eventSig common.Hash + cachedOffRampTokens cache.AutoSync[cciptypes.OffRampTokens] + sourceToDestTokensCache sync.Map + + // Dynamic config + // configMu guards all the dynamic config fields. + configMu sync.RWMutex + gasPriceEstimator prices.GasPriceEstimatorExec + offchainConfig cciptypes.ExecOffchainConfig + onchainConfig cciptypes.ExecOnchainConfig +} + +func (o *OffRamp) GetStaticConfig(ctx context.Context) (cciptypes.OffRampStaticConfig, error) { + if o.offRampV100 == nil { + return cciptypes.OffRampStaticConfig{}, fmt.Errorf("offramp not initialized") + } + c, err := o.offRampV100.GetStaticConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.OffRampStaticConfig{}, fmt.Errorf("error while retrieving offramp config: %w", err) + } + return cciptypes.OffRampStaticConfig{ + CommitStore: cciptypes.Address(c.CommitStore.String()), + ChainSelector: c.ChainSelector, + SourceChainSelector: c.SourceChainSelector, + OnRamp: cciptypes.Address(c.OnRamp.String()), + PrevOffRamp: cciptypes.Address(c.PrevOffRamp.String()), + ArmProxy: cciptypes.Address(c.ArmProxy.String()), + }, nil +} + +func (o *OffRamp) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) { + return o.offRampV100.GetExecutionState(&bind.CallOpts{Context: ctx}, sequenceNumber) +} + +func (o *OffRamp) GetSenderNonce(ctx context.Context, sender cciptypes.Address) (uint64, error) { + evmAddr, err := ccipcalc.GenericAddrToEvm(sender) + if err != nil { + return 0, err + } + return o.offRampV100.GetSenderNonce(&bind.CallOpts{Context: ctx}, evmAddr) +} + +func (o *OffRamp) ListSenderNonces(ctx context.Context, senders []cciptypes.Address) (map[cciptypes.Address]uint64, error) { + if len(senders) == 0 { + return make(map[cciptypes.Address]uint64), nil + } + + evmSenders, err := ccipcalc.GenericAddrsToEvm(senders...) + if err != nil { + return nil, errors.Wrap(err, "failed to convert generic addresses to evm addresses") + } + + evmCalls := make([]rpclib.EvmCall, 0, len(evmSenders)) + for _, evmAddr := range evmSenders { + evmCalls = append(evmCalls, rpclib.NewEvmCall( + abiOffRamp, + "getSenderNonce", + o.addr, + evmAddr, + )) + } + + results, err := o.evmBatchCaller.BatchCall(ctx, 0, evmCalls) + if err != nil { + o.Logger.Errorw("error while batch fetching sender nonces", "err", err, "senders", evmSenders) + return nil, err + } + + nonces, err := rpclib.ParseOutputs[uint64](results, func(d rpclib.DataAndErr) (uint64, error) { + return rpclib.ParseOutput[uint64](d, 0) + }) + if err != nil { + o.Logger.Errorw("error while parsing sender nonces", "err", err, "senders", evmSenders) + return nil, err + } + + if len(senders) != len(nonces) { + o.Logger.Errorw("unexpected number of nonces returned", "senders", evmSenders, "nonces", nonces) + return nil, errors.New("unexpected number of nonces returned") + } + + senderNonce := make(map[cciptypes.Address]uint64, len(senders)) + for i, sender := range senders { + senderNonce[sender] = nonces[i] + } + return senderNonce, nil +} + +func (o *OffRamp) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) { + state, err := o.offRampV100.CurrentRateLimiterState(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.TokenBucketRateLimit{}, err + } + return cciptypes.TokenBucketRateLimit{ + Tokens: state.Tokens, + LastUpdated: state.LastUpdated, + IsEnabled: state.IsEnabled, + Capacity: state.Capacity, + Rate: state.Rate, + }, nil +} + +func (o *OffRamp) getDestinationTokensFromSourceTokens(ctx context.Context, tokenAddresses []cciptypes.Address) ([]cciptypes.Address, error) { + destTokens := make([]cciptypes.Address, len(tokenAddresses)) + found := make(map[cciptypes.Address]bool) + + for i, tokenAddress := range tokenAddresses { + if v, exists := o.sourceToDestTokensCache.Load(tokenAddress); exists { + if destToken, isAddr := v.(cciptypes.Address); isAddr { + destTokens[i] = destToken + found[tokenAddress] = true + } else { + o.Logger.Errorf("source to dest cache contains invalid type %T", v) + } + } + } + + if len(found) == len(tokenAddresses) { + return destTokens, nil + } + + evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAddresses...) + if err != nil { + return nil, err + } + + evmCalls := make([]rpclib.EvmCall, 0, len(tokenAddresses)) + for i, sourceTk := range tokenAddresses { + if !found[sourceTk] { + evmCalls = append(evmCalls, rpclib.NewEvmCall(abiOffRamp, "getDestinationToken", o.addr, evmAddrs[i])) + } + } + + results, err := o.evmBatchCaller.BatchCall(ctx, 0, evmCalls) + if err != nil { + return nil, fmt.Errorf("batch call limit: %w", err) + } + + destTokensFromRpc, err := rpclib.ParseOutputs[common.Address](results, func(d rpclib.DataAndErr) (common.Address, error) { + return rpclib.ParseOutput[common.Address](d, 0) + }) + if err != nil { + return nil, fmt.Errorf("parse outputs: %w", err) + } + + j := 0 + for i, sourceToken := range tokenAddresses { + if !found[sourceToken] { + destTokens[i] = cciptypes.Address(destTokensFromRpc[j].String()) + o.sourceToDestTokensCache.Store(sourceToken, destTokens[i]) + j++ + } + } + + seenDestTokens := mapset.NewSet[cciptypes.Address]() + for _, destToken := range destTokens { + if seenDestTokens.Contains(destToken) { + return nil, fmt.Errorf("offRamp misconfig, destination token %s already exists", destToken) + } + seenDestTokens.Add(destToken) + } + + return destTokens, nil +} + +func (o *OffRamp) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) { + tokens, err := o.GetTokens(ctx) + if err != nil { + return nil, err + } + + destTokens, err := o.getDestinationTokensFromSourceTokens(ctx, tokens.SourceTokens) + if err != nil { + return nil, fmt.Errorf("get destination tokens from source tokens: %w", err) + } + + srcToDstTokenMapping := make(map[cciptypes.Address]cciptypes.Address, len(tokens.SourceTokens)) + for i, sourceToken := range tokens.SourceTokens { + srcToDstTokenMapping[sourceToken] = destTokens[i] + } + return srcToDstTokenMapping, nil +} + +func (o *OffRamp) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) { + return o.cachedOffRampTokens.Get(ctx, func(ctx context.Context) (cciptypes.OffRampTokens, error) { + destTokens, err := o.offRampV100.GetDestinationTokens(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.OffRampTokens{}, fmt.Errorf("get destination tokens: %w", err) + } + sourceTokens, err := o.offRampV100.GetSupportedTokens(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.OffRampTokens{}, err + } + + return cciptypes.OffRampTokens{ + DestinationTokens: ccipcalc.EvmAddrsToGeneric(destTokens...), + SourceTokens: ccipcalc.EvmAddrsToGeneric(sourceTokens...), + }, nil + }) +} + +func (o *OffRamp) GetRouter(ctx context.Context) (cciptypes.Address, error) { + dynamicConfig, err := o.offRampV100.GetDynamicConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return "", err + } + return ccipcalc.EvmAddrToGeneric(dynamicConfig.Router), nil +} + +func (o *OffRamp) OffchainConfig(ctx context.Context) (cciptypes.ExecOffchainConfig, error) { + o.configMu.RLock() + defer o.configMu.RUnlock() + return o.offchainConfig, nil +} + +func (o *OffRamp) OnchainConfig(ctx context.Context) (cciptypes.ExecOnchainConfig, error) { + o.configMu.RLock() + defer o.configMu.RUnlock() + return o.onchainConfig, nil +} + +func (o *OffRamp) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorExec, error) { + o.configMu.RLock() + defer o.configMu.RUnlock() + return o.gasPriceEstimator, nil +} + +func (o *OffRamp) Address(ctx context.Context) (cciptypes.Address, error) { + return cciptypes.Address(o.addr.String()), nil +} + +func (o *OffRamp) UpdateDynamicConfig(onchainConfig cciptypes.ExecOnchainConfig, offchainConfig cciptypes.ExecOffchainConfig, gasPriceEstimator prices.GasPriceEstimatorExec) { + o.configMu.Lock() + o.onchainConfig = onchainConfig + o.offchainConfig = offchainConfig + o.gasPriceEstimator = gasPriceEstimator + o.configMu.Unlock() +} + +func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) { + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes) + if err != nil { + return "", "", err + } + + offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](offchainConfigBytes) + if err != nil { + return "", "", err + } + destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client) + if err != nil { + return "", "", err + } + destWrappedNative, err := destRouter.GetWrappedNative(nil) + if err != nil { + return "", "", err + } + + offchainConfig := cciptypes.ExecOffchainConfig{ + DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations, + BatchGasLimit: offchainConfigParsed.BatchGasLimit, + RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour, + InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry, + RootSnoozeTime: offchainConfigParsed.RootSnoozeTime, + MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval, + BatchingStrategyID: offchainConfigParsed.BatchingStrategyID, + } + onchainConfig := cciptypes.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds), + Router: cciptypes.Address(onchainConfigParsed.Router.String()), + } + gasPriceEstimator := prices.NewExecGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0) + + o.UpdateDynamicConfig(onchainConfig, offchainConfig, gasPriceEstimator) + + o.Logger.Infow("Starting exec plugin", + "offchainConfig", onchainConfigParsed, + "onchainConfig", offchainConfigParsed) + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), + cciptypes.Address(destWrappedNative.String()), nil +} + +func (o *OffRamp) Close() error { + return logpollerutil.UnregisterLpFilters(o.lp, o.filters) +} + +func (o *OffRamp) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, confs int) ([]cciptypes.ExecutionStateChangedWithTxMeta, error) { + latestBlock, err := o.lp.LatestBlock(ctx) + if err != nil { + return nil, fmt.Errorf("get lp latest block: %w", err) + } + + logs, err := o.lp.IndexedLogsTopicRange( + ctx, + o.eventSig, + o.addr, + o.eventIndex, + logpoller.EvmWord(seqNumMin), + logpoller.EvmWord(seqNumMax), + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.ExecutionStateChanged]( + logs, + o.Logger, + func(log types.Log) (*cciptypes.ExecutionStateChanged, error) { + sc, err1 := o.offRampV100.ParseExecutionStateChanged(log) + if err1 != nil { + return nil, err1 + } + + return &cciptypes.ExecutionStateChanged{ + SequenceNumber: sc.SequenceNumber, + }, nil + }, + ) + if err != nil { + return nil, fmt.Errorf("parse logs: %w", err) + } + + res := make([]cciptypes.ExecutionStateChangedWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.ExecutionStateChangedWithTxMeta{ + TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)), + ExecutionStateChanged: log.Data, + }) + } + return res, nil +} + +func encodeExecutionReport(args abi.Arguments, report cciptypes.ExecReport) ([]byte, error) { + var msgs []evm_2_evm_offramp_1_0_0.InternalEVM2EVMMessage + for _, msg := range report.Messages { + var ta []evm_2_evm_offramp_1_0_0.ClientEVMTokenAmount + for _, tokenAndAmount := range msg.TokenAmounts { + evmTokenAddr, err := ccipcalc.GenericAddrToEvm(tokenAndAmount.Token) + if err != nil { + return nil, err + } + + ta = append(ta, evm_2_evm_offramp_1_0_0.ClientEVMTokenAmount{ + Token: evmTokenAddr, + Amount: tokenAndAmount.Amount, + }) + } + + senderEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.Sender) + if err != nil { + return nil, fmt.Errorf("msg sender is not evm addr: %w", err) + } + + receiverEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.Receiver) + if err != nil { + return nil, fmt.Errorf("msg receiver is not evm addr: %w", err) + } + + feeTokenEvmAddr, err := ccipcalc.GenericAddrToEvm(msg.FeeToken) + if err != nil { + return nil, fmt.Errorf("fee token is not evm addr: %w", err) + } + + msgs = append(msgs, evm_2_evm_offramp_1_0_0.InternalEVM2EVMMessage{ + SourceChainSelector: msg.SourceChainSelector, + Sender: senderEvmAddr, + Receiver: receiverEvmAddr, + SequenceNumber: msg.SequenceNumber, + GasLimit: msg.GasLimit, + Strict: msg.Strict, + Nonce: msg.Nonce, + FeeToken: feeTokenEvmAddr, + FeeTokenAmount: msg.FeeTokenAmount, + Data: msg.Data, + TokenAmounts: ta, + MessageId: msg.MessageID, + }) + } + + rep := evm_2_evm_offramp_1_0_0.InternalExecutionReport{ + Messages: msgs, + OffchainTokenData: report.OffchainTokenData, + Proofs: report.Proofs, + ProofFlagBits: report.ProofFlagBits, + } + return args.PackValues([]interface{}{&rep}) +} + +func (o *OffRamp) EncodeExecutionReport(ctx context.Context, report cciptypes.ExecReport) ([]byte, error) { + return encodeExecutionReport(o.ExecutionReportArgs, report) +} + +func DecodeExecReport(ctx context.Context, args abi.Arguments, report []byte) (cciptypes.ExecReport, error) { + unpacked, err := args.Unpack(report) + if err != nil { + return cciptypes.ExecReport{}, err + } + if len(unpacked) == 0 { + return cciptypes.ExecReport{}, errors.New("assumptionViolation: expected at least one element") + } + + erStruct, ok := unpacked[0].(struct { + Messages []struct { + SourceChainSelector uint64 `json:"sourceChainSelector"` + SequenceNumber uint64 `json:"sequenceNumber"` + FeeTokenAmount *big.Int `json:"feeTokenAmount"` + Sender common.Address `json:"sender"` + Nonce uint64 `json:"nonce"` + GasLimit *big.Int `json:"gasLimit"` + Strict bool `json:"strict"` + Receiver common.Address `json:"receiver"` + Data []uint8 `json:"data"` + TokenAmounts []struct { + Token common.Address `json:"token"` + Amount *big.Int `json:"amount"` + } `json:"tokenAmounts"` + FeeToken common.Address `json:"feeToken"` + MessageId [32]uint8 `json:"messageId"` + } `json:"messages"` + OffchainTokenData [][][]uint8 `json:"offchainTokenData"` + Proofs [][32]uint8 `json:"proofs"` + ProofFlagBits *big.Int `json:"proofFlagBits"` + }) + + if !ok { + return cciptypes.ExecReport{}, fmt.Errorf("got %T", unpacked[0]) + } + messages := make([]cciptypes.EVM2EVMMessage, 0, len(erStruct.Messages)) + for _, msg := range erStruct.Messages { + var tokensAndAmounts []cciptypes.TokenAmount + for _, tokenAndAmount := range msg.TokenAmounts { + tokensAndAmounts = append(tokensAndAmounts, cciptypes.TokenAmount{ + Token: cciptypes.Address(tokenAndAmount.Token.String()), + Amount: tokenAndAmount.Amount, + }) + } + messages = append(messages, cciptypes.EVM2EVMMessage{ + SequenceNumber: msg.SequenceNumber, + GasLimit: msg.GasLimit, + Nonce: msg.Nonce, + MessageID: msg.MessageId, + SourceChainSelector: msg.SourceChainSelector, + Sender: cciptypes.Address(msg.Sender.String()), + Receiver: cciptypes.Address(msg.Receiver.String()), + Strict: msg.Strict, + FeeToken: cciptypes.Address(msg.FeeToken.String()), + FeeTokenAmount: msg.FeeTokenAmount, + Data: msg.Data, + TokenAmounts: tokensAndAmounts, + // TODO: Not needed for plugins, but should be recomputed for consistency. + // Requires the offramp knowing about onramp version + Hash: [32]byte{}, + }) + } + + // Unpack will populate with big.Int{false, } for 0 values, + // which is different from the expected big.NewInt(0). Rebuild to the expected value for this case. + return cciptypes.ExecReport{ + Messages: messages, + OffchainTokenData: erStruct.OffchainTokenData, + Proofs: erStruct.Proofs, + ProofFlagBits: new(big.Int).SetBytes(erStruct.ProofFlagBits.Bytes()), + }, nil +} + +func (o *OffRamp) DecodeExecutionReport(ctx context.Context, report []byte) (cciptypes.ExecReport, error) { + return DecodeExecReport(ctx, o.ExecutionReportArgs, report) +} + +func (o *OffRamp) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(o.lp, o.filters) +} + +func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) { + offRamp, err := evm_2_evm_offramp_1_0_0.NewEVM2EVMOffRamp(addr, ec) + if err != nil { + return nil, err + } + + executionStateChangedSequenceNumberIndex := 1 + executionReportArgs := abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1] + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(EXEC_EXECUTION_STATE_CHANGES, addr.String()), + EventSigs: []common.Hash{ExecutionStateChangedEvent}, + Addresses: []common.Address{addr}, + Retention: ccipdata.CommitExecLogsRetention, + }, + { + Name: logpoller.FilterName(EXEC_TOKEN_POOL_ADDED, addr.String()), + EventSigs: []common.Hash{PoolAddedEvent}, + Addresses: []common.Address{addr}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + { + Name: logpoller.FilterName(EXEC_TOKEN_POOL_REMOVED, addr.String()), + EventSigs: []common.Hash{PoolRemovedEvent}, + Addresses: []common.Address{addr}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + } + + return &OffRamp{ + offRampV100: offRamp, + Client: ec, + addr: addr, + Logger: lggr, + lp: lp, + filters: filters, + Estimator: estimator, + DestMaxGasPrice: destMaxGasPrice, + ExecutionReportArgs: executionReportArgs, + eventSig: ExecutionStateChangedEvent, + eventIndex: executionStateChangedSequenceNumberIndex, + configMu: sync.RWMutex{}, + evmBatchCaller: rpclib.NewDynamicLimitedBatchCaller( + lggr, + ec, + rpclib.DefaultRpcBatchSizeLimit, + rpclib.DefaultRpcBatchBackOffMultiplier, + rpclib.DefaultMaxParallelRpcCalls, + ), + cachedOffRampTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens]( + lp, + offRamp_poolAddedPoolRemovedEvents, + offRamp.Address(), + ), + // values set on the fly after ChangeConfig is called + gasPriceEstimator: prices.ExecGasPriceEstimator{}, + offchainConfig: cciptypes.ExecOffchainConfig{}, + onchainConfig: cciptypes.ExecOnchainConfig{}, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go new file mode 100644 index 00000000000..d834b792ce4 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_test.go @@ -0,0 +1,38 @@ +package v1_0_0_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +func TestExecutionReportEncodingV100(t *testing.T) { + // Note could consider some fancier testing here (fuzz/property) + // but I think that would essentially be testing geth's abi library + // as our encode/decode is a thin wrapper around that. + report := cciptypes.ExecReport{ + Messages: []cciptypes.EVM2EVMMessage{}, + OffchainTokenData: [][][]byte{{}}, + Proofs: [][32]byte{testutils.Random32Byte()}, + ProofFlagBits: big.NewInt(133), + } + + offRamp, err := v1_0_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lpmocks.NewLogPoller(t), nil, nil) + require.NoError(t, err) + + ctx := testutils.Context(t) + encodeExecutionReport, err := offRamp.EncodeExecutionReport(ctx, report) + require.NoError(t, err) + decodeCommitReport, err := offRamp.DecodeExecutionReport(ctx, encodeExecutionReport) + require.NoError(t, err) + require.Equal(t, report.Proofs, decodeCommitReport.Proofs) + require.Equal(t, report, decodeCommitReport) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go new file mode 100644 index 00000000000..f8b1dc4e615 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_reader_unit_test.go @@ -0,0 +1,231 @@ +package v1_0_0 + +import ( + "fmt" + "math/rand" + "slices" + "testing" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + evmclimocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_0_0" + mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +func TestOffRampGetDestinationTokensFromSourceTokens(t *testing.T) { + ctx := testutils.Context(t) + const numSrcTokens = 20 + + testCases := []struct { + name string + outputChangeFn func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr + expErr bool + }{ + { + name: "happy path", + outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { return outputs }, + expErr: false, + }, + { + name: "rpc error", + outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { + outputs[2].Err = fmt.Errorf("some error") + return outputs + }, + expErr: true, + }, + { + name: "unexpected outputs length should be fine if the type is correct", + outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { + outputs[0].Outputs = append(outputs[0].Outputs, "unexpected", 123) + return outputs + }, + expErr: false, + }, + { + name: "different compatible type", + outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { + outputs[0].Outputs = []any{outputs[0].Outputs[0].(common.Address)} + return outputs + }, + expErr: false, + }, + { + name: "different incompatible type", + outputChangeFn: func(outputs []rpclib.DataAndErr) []rpclib.DataAndErr { + outputs[0].Outputs = []any{outputs[0].Outputs[0].(common.Address).Bytes()} + return outputs + }, + expErr: true, + }, + } + + lp := mocks.NewLogPoller(t) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + batchCaller := rpclibmocks.NewEvmBatchCaller(t) + o := &OffRamp{evmBatchCaller: batchCaller, lp: lp} + srcTks, dstTks, outputs := generateTokensAndOutputs(numSrcTokens) + outputs = tc.outputChangeFn(outputs) + batchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything).Return(outputs, nil) + genericAddrs := ccipcalc.EvmAddrsToGeneric(srcTks...) + actualDstTokens, err := o.getDestinationTokensFromSourceTokens(ctx, genericAddrs) + + if tc.expErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, ccipcalc.EvmAddrsToGeneric(dstTks...), actualDstTokens) + }) + } +} + +func TestCachedOffRampTokens(t *testing.T) { + // Test data. + srcTks, dstTks, _ := generateTokensAndOutputs(3) + + // Mock contract wrapper. + mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t) + mockOffRamp.On("GetDestinationTokens", mock.Anything).Return(dstTks, nil) + mockOffRamp.On("GetSupportedTokens", mock.Anything).Return(srcTks, nil) + mockOffRamp.On("Address").Return(utils.RandomAddress()) + + lp := mocks.NewLogPoller(t) + lp.On("LatestBlock", mock.Anything).Return(logpoller.LogPollerBlock{BlockNumber: rand.Int63()}, nil) + + offRamp := OffRamp{ + offRampV100: mockOffRamp, + lp: lp, + Logger: logger.TestLogger(t), + Client: evmclimocks.NewClient(t), + evmBatchCaller: rpclibmocks.NewEvmBatchCaller(t), + cachedOffRampTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens]( + lp, + offRamp_poolAddedPoolRemovedEvents, + mockOffRamp.Address(), + ), + } + + ctx := testutils.Context(t) + tokens, err := offRamp.GetTokens(ctx) + require.NoError(t, err) + + // Verify data is properly loaded in the cache. + expectedPools := make(map[cciptypes.Address]cciptypes.Address) + for i := range dstTks { + expectedPools[cciptypes.Address(dstTks[i].String())] = cciptypes.Address(dstTks[i].String()) + } + require.Equal(t, cciptypes.OffRampTokens{ + DestinationTokens: ccipcalc.EvmAddrsToGeneric(dstTks...), + SourceTokens: ccipcalc.EvmAddrsToGeneric(srcTks...), + }, tokens) +} + +func generateTokensAndOutputs(nbTokens uint) ([]common.Address, []common.Address, []rpclib.DataAndErr) { + srcTks := make([]common.Address, nbTokens) + dstTks := make([]common.Address, nbTokens) + outputs := make([]rpclib.DataAndErr, nbTokens) + for i := range srcTks { + srcTks[i] = utils.RandomAddress() + dstTks[i] = utils.RandomAddress() + outputs[i] = rpclib.DataAndErr{ + Outputs: []any{dstTks[i]}, Err: nil, + } + } + return srcTks, dstTks, outputs +} + +func Test_LogsAreProperlyMarkedAsFinalized(t *testing.T) { + minSeqNr := uint64(10) + maxSeqNr := uint64(14) + inputLogs := []logpoller.Log{ + CreateExecutionStateChangeEventLog(t, 10, 2, utils.RandomBytes32()), + CreateExecutionStateChangeEventLog(t, 11, 3, utils.RandomBytes32()), + CreateExecutionStateChangeEventLog(t, 12, 5, utils.RandomBytes32()), + CreateExecutionStateChangeEventLog(t, 14, 7, utils.RandomBytes32()), + } + + tests := []struct { + name string + lastFinalizedBlock uint64 + expectedFinalizedSequenceNr []uint64 + }{ + { + "all logs are finalized", + 10, + []uint64{10, 11, 12, 14}, + }, + { + "some logs are finalized", + 5, + []uint64{10, 11, 12}, + }, + { + "no logs are finalized", + 1, + []uint64{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + offrampAddress := utils.RandomAddress() + + lp := mocks.NewLogPoller(t) + lp.On("LatestBlock", mock.Anything). + Return(logpoller.LogPollerBlock{FinalizedBlockNumber: int64(tt.lastFinalizedBlock)}, nil) + lp.On("IndexedLogsTopicRange", mock.Anything, ExecutionStateChangedEvent, offrampAddress, 1, logpoller.EvmWord(minSeqNr), logpoller.EvmWord(maxSeqNr), evmtypes.Confirmations(0)). + Return(inputLogs, nil) + + offRamp, err := NewOffRamp(logger.TestLogger(t), offrampAddress, evmclimocks.NewClient(t), lp, nil, nil) + require.NoError(t, err) + logs, err := offRamp.GetExecutionStateChangesBetweenSeqNums(testutils.Context(t), minSeqNr, maxSeqNr, 0) + require.NoError(t, err) + assert.Len(t, logs, len(inputLogs)) + + for _, log := range logs { + assert.Equal(t, slices.Contains(tt.expectedFinalizedSequenceNr, log.SequenceNumber), log.IsFinalized()) + } + }) + } +} + +func TestGetRouter(t *testing.T) { + routerAddr := utils.RandomAddress() + + mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t) + mockOffRamp.On("GetDynamicConfig", mock.Anything).Return(evm_2_evm_offramp_1_0_0.EVM2EVMOffRampDynamicConfig{ + Router: routerAddr, + }, nil) + + offRamp := OffRamp{ + offRampV100: mockOffRamp, + } + + ctx := testutils.Context(t) + gotRouterAddr, err := offRamp.GetRouter(ctx) + require.NoError(t, err) + + gotRouterEvmAddr, err := ccipcalc.GenericAddrToEvm(gotRouterAddr) + require.NoError(t, err) + assert.Equal(t, routerAddr, gotRouterEvmAddr) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go new file mode 100644 index 00000000000..44fb6ca0630 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/offramp_test.go @@ -0,0 +1,232 @@ +package v1_0_0 + +import ( + "encoding/json" + "testing" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks" + + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" +) + +func TestExecOffchainConfig100_Encoding(t *testing.T) { + tests := []struct { + name string + want ExecOffchainConfig + expectErr bool + }{ + { + name: "encodes and decodes config with all fields set", + want: ExecOffchainConfig{ + SourceFinalityDepth: 3, + DestOptimisticConfirmations: 6, + DestFinalityDepth: 3, + BatchGasLimit: 5_000_000, + RelativeBoostPerWaitHour: 0.07, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + RootSnoozeTime: *config.MustNewDuration(128 * time.Minute), + MessageVisibilityInterval: *config.MustNewDuration(6 * time.Hour), + }, + }, + { + name: "fails decoding when all fields present but with 0 values", + want: ExecOffchainConfig{ + SourceFinalityDepth: 0, + DestFinalityDepth: 0, + DestOptimisticConfirmations: 0, + BatchGasLimit: 0, + RelativeBoostPerWaitHour: 0, + InflightCacheExpiry: *config.MustNewDuration(0), + RootSnoozeTime: *config.MustNewDuration(0), + MessageVisibilityInterval: *config.MustNewDuration(0), + }, + expectErr: true, + }, + { + name: "fails decoding when all fields are missing", + want: ExecOffchainConfig{}, + expectErr: true, + }, + { + name: "fails decoding when some fields are missing", + want: ExecOffchainConfig{ + SourceFinalityDepth: 99999999, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + }, + expectErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + exp := tc.want + encode, err := ccipconfig.EncodeOffchainConfig(&exp) + require.NoError(t, err) + got, err := ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](encode) + + if tc.expectErr { + require.ErrorContains(t, err, "must set") + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} + +func TestExecOffchainConfig100_AllFieldsRequired(t *testing.T) { + cfg := ExecOffchainConfig{ + SourceFinalityDepth: 3, + DestOptimisticConfirmations: 6, + DestFinalityDepth: 3, + BatchGasLimit: 5_000_000, + RelativeBoostPerWaitHour: 0.07, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + RootSnoozeTime: *config.MustNewDuration(128 * time.Minute), + BatchingStrategyID: 0, + } + encoded, err := ccipconfig.EncodeOffchainConfig(&cfg) + require.NoError(t, err) + + var configAsMap map[string]any + err = json.Unmarshal(encoded, &configAsMap) + require.NoError(t, err) + for keyToDelete := range configAsMap { + if keyToDelete == "MessageVisibilityInterval" { + continue // this field is optional + } + + partialConfig := make(map[string]any) + for k, v := range configAsMap { + if k != keyToDelete { + partialConfig[k] = v + } + } + encodedPartialConfig, err := json.Marshal(partialConfig) + require.NoError(t, err) + _, err = ccipconfig.DecodeOffchainConfig[ExecOffchainConfig](encodedPartialConfig) + if keyToDelete == "BatchingStrategyID" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, keyToDelete) + } + } +} + +func Test_GetSendersNonce(t *testing.T) { + sender1 := cciptypes.Address(utils.RandomAddress().String()) + sender2 := cciptypes.Address(utils.RandomAddress().String()) + + tests := []struct { + name string + addresses []cciptypes.Address + batchCaller *rpclibmocks.EvmBatchCaller + expectedResult map[cciptypes.Address]uint64 + expectedError bool + }{ + { + name: "return empty map when input is empty", + addresses: []cciptypes.Address{}, + batchCaller: rpclibmocks.NewEvmBatchCaller(t), + expectedResult: map[cciptypes.Address]uint64{}, + }, + { + name: "return error when batch call fails", + addresses: []cciptypes.Address{sender1}, + batchCaller: func() *rpclibmocks.EvmBatchCaller { + mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t) + mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything). + Return(nil, errors.New("batch call error")) + return mockBatchCaller + }(), + expectedError: true, + }, + { + name: "return error when nonces dont match senders", + addresses: []cciptypes.Address{sender1, sender2}, + batchCaller: func() *rpclibmocks.EvmBatchCaller { + mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t) + results := []rpclib.DataAndErr{ + { + Outputs: []any{uint64(1)}, + Err: nil, + }, + } + mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything). + Return(results, nil) + return mockBatchCaller + }(), + expectedError: true, + }, + { + name: "return error when single request from batch fails", + addresses: []cciptypes.Address{sender1, sender2}, + batchCaller: func() *rpclibmocks.EvmBatchCaller { + mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t) + results := []rpclib.DataAndErr{ + { + Outputs: []any{uint64(1)}, + Err: nil, + }, + { + Outputs: []any{}, + Err: errors.New("request failed"), + }, + } + mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything). + Return(results, nil) + return mockBatchCaller + }(), + expectedError: true, + }, + { + name: "return map of nonce per sender", + addresses: []cciptypes.Address{sender1, sender2}, + batchCaller: func() *rpclibmocks.EvmBatchCaller { + mockBatchCaller := rpclibmocks.NewEvmBatchCaller(t) + results := []rpclib.DataAndErr{ + { + Outputs: []any{uint64(1)}, + Err: nil, + }, + { + Outputs: []any{uint64(2)}, + Err: nil, + }, + } + mockBatchCaller.On("BatchCall", mock.Anything, mock.Anything, mock.Anything). + Return(results, nil) + return mockBatchCaller + }(), + expectedResult: map[cciptypes.Address]uint64{ + sender1: uint64(1), + sender2: uint64(2), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + offramp := OffRamp{evmBatchCaller: test.batchCaller, Logger: logger.TestLogger(t)} + nonce, err := offramp.ListSenderNonces(testutils.Context(t), test.addresses) + + if test.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.expectedResult, nonce) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go new file mode 100644 index 00000000000..29cb357223b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/onramp.go @@ -0,0 +1,240 @@ +package v1_0_0 + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" +) + +const ( + CCIPSendRequestedEventName = "CCIPSendRequested" + ConfigSetEventName = "ConfigSet" +) + +var _ ccipdata.OnRampReader = &OnRamp{} + +type OnRamp struct { + address common.Address + onRamp *evm_2_evm_onramp_1_0_0.EVM2EVMOnRamp + lp logpoller.LogPoller + lggr logger.Logger + client client.Client + leafHasher ccipdata.LeafHasherInterface[[32]byte] + sendRequestedEventSig common.Hash + sendRequestedSeqNumberWord int + filters []logpoller.Filter + cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address] + // Static config can be cached, because it's never expected to change. + // The only way to change that is through the contract's constructor (redeployment) + cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig] + cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract] +} + +func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) { + onRamp, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampAddress, source) + if err != nil { + return nil, err + } + onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_0_0.EVM2EVMOnRampABI) + eventSig := abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI) + configSetEventSig := abihelpers.MustGetEventID(ConfigSetEventName, onRampABI) + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress), + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CommitExecLogsRetention, + }, + { + Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress), + EventSigs: []common.Hash{configSetEventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + } + cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp_1_0_0.EVM2EVMOnRampStaticConfig, error) { + return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx}) + }) + cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) { + staticConfig, err := cachedStaticConfig(ctx) + if err != nil { + return nil, err + } + + return arm_contract.NewARMContract(staticConfig.ArmProxy, source) + }) + return &OnRamp{ + lggr: lggr, + address: onRampAddress, + onRamp: onRamp, + client: source, + filters: filters, + lp: sourceLP, + leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp), + // offset || sourceChainID || seqNum || ... + sendRequestedSeqNumberWord: 2, + sendRequestedEventSig: eventSig, + cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address]( + sourceLP, + []common.Hash{configSetEventSig}, + onRampAddress, + ), + cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig), + cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract), + }, nil +} + +func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) { + return cciptypes.Address(o.onRamp.Address().String()), nil +} + +func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) { + if o.onRamp == nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized") + } + legacyDynamicConfig, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return cciptypes.OnRampDynamicConfig{}, err + } + return cciptypes.OnRampDynamicConfig{ + Router: cciptypes.Address(legacyDynamicConfig.Router.String()), + MaxNumberOfTokensPerMsg: legacyDynamicConfig.MaxTokensLength, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: cciptypes.Address(legacyDynamicConfig.PriceRegistry.String()), + MaxDataBytes: legacyDynamicConfig.MaxDataSize, + MaxPerMsgGasLimit: uint32(legacyDynamicConfig.MaxGasLimit), + }, nil +} + +func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) { + return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) { + c, err := o.GetDynamicConfig(ctx) + if err != nil { + return "", err + } + return c.PriceRegistry, nil + }) +} + +func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) { + logs, err := o.lp.LogsDataWordRange( + ctx, + o.sendRequestedEventSig, + o.address, + o.sendRequestedSeqNumberWord, + logpoller.EvmWord(seqNumMin), + logpoller.EvmWord(seqNumMax), + ccipdata.LogsConfirmations(finalized), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage) + if err != nil { + return nil, err + } + + res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{ + TxMeta: log.TxMeta, + EVM2EVMMessage: log.Data, + }) + } + return res, nil +} + +func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) { + config, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return "", err + } + return cciptypes.Address(config.Router.String()), nil +} + +func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) { + if err := o.lp.Healthy(); err != nil { + return false, nil + } + return true, nil +} + +func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) { + arm, err := o.cachedRmnContract(ctx) + if err != nil { + return false, fmt.Errorf("intializing Arm contract through the ArmProxy: %w", err) + } + + cursed, err := arm.IsCursed0(&bind.CallOpts{Context: ctx}) + if err != nil { + return false, fmt.Errorf("checking if source Arm is cursed: %w", err) + } + return cursed, nil +} + +func (o *OnRamp) GetUSDCMessagePriorToLogIndexInTx(ctx context.Context, logIndex, offsetFromFinal int64, txHash common.Hash) ([]byte, error) { + return nil, errors.New("USDC not supported in < 1.2.0") +} + +func (o *OnRamp) Close() error { + return logpollerutil.UnregisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) { + msg, err := o.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return nil, err + } + h, err := o.leafHasher.HashLeaf(log) + if err != nil { + return nil, err + } + tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts)) + for i, tokenAndAmount := range msg.Message.TokenAmounts { + tokensAndAmounts[i] = cciptypes.TokenAmount{ + Token: cciptypes.Address(tokenAndAmount.Token.String()), + Amount: tokenAndAmount.Amount, + } + } + return &cciptypes.EVM2EVMMessage{ + SequenceNumber: msg.Message.SequenceNumber, + GasLimit: msg.Message.GasLimit, + Nonce: msg.Message.Nonce, + MessageID: msg.Message.MessageId, + SourceChainSelector: msg.Message.SourceChainSelector, + Sender: cciptypes.Address(msg.Message.Sender.String()), + Receiver: cciptypes.Address(msg.Message.Receiver.String()), + Strict: msg.Message.Strict, + FeeToken: cciptypes.Address(msg.Message.FeeToken.String()), + FeeTokenAmount: msg.Message.FeeTokenAmount, + Data: msg.Message.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: make([][]byte, len(msg.Message.TokenAmounts)), // Always empty in 1.0 + Hash: h, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go new file mode 100644 index 00000000000..d2104f985b9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/price_registry.go @@ -0,0 +1,310 @@ +package v1_0_0 + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/erc20" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" +) + +var ( + abiERC20 = abihelpers.MustParseABI(erc20.ERC20ABI) + _ ccipdata.PriceRegistryReader = &PriceRegistry{} + // Exposed only for backwards compatibility with tests. + UsdPerUnitGasUpdated = abihelpers.MustGetEventID("UsdPerUnitGasUpdated", abihelpers.MustParseABI(price_registry_1_0_0.PriceRegistryABI)) +) + +type PriceRegistry struct { + priceRegistry price_registry_1_0_0.PriceRegistryInterface + address common.Address + lp logpoller.LogPoller + evmBatchCaller rpclib.EvmBatchCaller + lggr logger.Logger + filters []logpoller.Filter + tokenUpdated common.Hash + gasUpdated common.Hash + feeTokenAdded common.Hash + feeTokenRemoved common.Hash + + feeTokensCache cache.AutoSync[[]common.Address] + tokenDecimalsCache sync.Map +} + +func NewPriceRegistry(lggr logger.Logger, priceRegistryAddr common.Address, lp logpoller.LogPoller, ec client.Client, registerFilters bool) (*PriceRegistry, error) { + priceRegistry, err := price_registry_1_0_0.NewPriceRegistry(priceRegistryAddr, ec) + if err != nil { + return nil, err + } + priceRegABI := abihelpers.MustParseABI(price_registry_1_0_0.PriceRegistryABI) + usdPerTokenUpdated := abihelpers.MustGetEventID("UsdPerTokenUpdated", priceRegABI) + feeTokenRemoved := abihelpers.MustGetEventID("FeeTokenRemoved", priceRegABI) + feeTokenAdded := abihelpers.MustGetEventID("FeeTokenAdded", priceRegABI) + var filters = []logpoller.Filter{ + { + Name: logpoller.FilterName(ccipdata.COMMIT_PRICE_UPDATES, priceRegistryAddr.String()), + EventSigs: []common.Hash{UsdPerUnitGasUpdated, usdPerTokenUpdated}, + Addresses: []common.Address{priceRegistryAddr}, + Retention: ccipdata.PriceUpdatesLogsRetention, + }, + { + Name: logpoller.FilterName(ccipdata.FEE_TOKEN_ADDED, priceRegistryAddr.String()), + EventSigs: []common.Hash{feeTokenAdded}, + Addresses: []common.Address{priceRegistryAddr}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + { + Name: logpoller.FilterName(ccipdata.FEE_TOKEN_REMOVED, priceRegistryAddr.String()), + EventSigs: []common.Hash{feeTokenRemoved}, + Addresses: []common.Address{priceRegistryAddr}, + Retention: ccipdata.CacheEvictionLogsRetention, + }} + if registerFilters { + err = logpollerutil.RegisterLpFilters(lp, filters) + if err != nil { + return nil, err + } + } + return &PriceRegistry{ + priceRegistry: priceRegistry, + address: priceRegistryAddr, + lp: lp, + evmBatchCaller: rpclib.NewDynamicLimitedBatchCaller( + lggr, + ec, + rpclib.DefaultRpcBatchSizeLimit, + rpclib.DefaultRpcBatchBackOffMultiplier, + rpclib.DefaultMaxParallelRpcCalls, + ), + lggr: lggr, + gasUpdated: UsdPerUnitGasUpdated, + tokenUpdated: usdPerTokenUpdated, + feeTokenRemoved: feeTokenRemoved, + feeTokenAdded: feeTokenAdded, + filters: filters, + feeTokensCache: cache.NewLogpollerEventsBased[[]common.Address]( + lp, + []common.Hash{feeTokenAdded, feeTokenRemoved}, + priceRegistryAddr, + ), + }, nil +} + +func (p *PriceRegistry) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) { + evmAddrs, err := ccipcalc.GenericAddrsToEvm(wantedTokens...) + if err != nil { + return nil, err + } + + tps, err := p.priceRegistry.GetTokenPrices(&bind.CallOpts{Context: ctx}, evmAddrs) + if err != nil { + return nil, err + } + var tpu []cciptypes.TokenPriceUpdate + for i, tp := range tps { + tpu = append(tpu, cciptypes.TokenPriceUpdate{ + TokenPrice: cciptypes.TokenPrice{ + Token: cciptypes.Address(evmAddrs[i].String()), + Value: tp.Value, + }, + TimestampUnixSec: big.NewInt(int64(tp.Timestamp)), + }) + } + return tpu, nil +} + +func (p *PriceRegistry) Address(ctx context.Context) (cciptypes.Address, error) { + return cciptypes.Address(p.address.String()), nil +} + +func (p *PriceRegistry) GetFeeTokens(ctx context.Context) ([]cciptypes.Address, error) { + feeTokens, err := p.feeTokensCache.Get(ctx, func(ctx context.Context) ([]common.Address, error) { + return p.priceRegistry.GetFeeTokens(&bind.CallOpts{Context: ctx}) + }) + if err != nil { + return nil, fmt.Errorf("get fee tokens: %w", err) + } + + return ccipcalc.EvmAddrsToGeneric(feeTokens...), nil +} + +func (p *PriceRegistry) Close() error { + return logpollerutil.UnregisterLpFilters(p.lp, p.filters) +} + +func (p *PriceRegistry) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.TokenPriceUpdateWithTxMeta, error) { + logs, err := p.lp.LogsCreatedAfter( + ctx, + p.tokenUpdated, + p.address, + ts, + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.TokenPriceUpdate]( + logs, + p.lggr, + func(log types.Log) (*cciptypes.TokenPriceUpdate, error) { + tp, err1 := p.priceRegistry.ParseUsdPerTokenUpdated(log) + if err1 != nil { + return nil, err1 + } + return &cciptypes.TokenPriceUpdate{ + TokenPrice: cciptypes.TokenPrice{ + Token: cciptypes.Address(tp.Token.String()), + Value: tp.Value, + }, + TimestampUnixSec: tp.Timestamp, + }, nil + }, + ) + if err != nil { + return nil, err + } + + res := make([]cciptypes.TokenPriceUpdateWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.TokenPriceUpdateWithTxMeta{ + TxMeta: log.TxMeta, + TokenPriceUpdate: log.Data, + }) + } + return res, nil +} + +func (p *PriceRegistry) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + logs, err := p.lp.IndexedLogsCreatedAfter( + ctx, + p.gasUpdated, + p.address, + 1, + []common.Hash{abihelpers.EvmWord(chainSelector)}, + ts, + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + return p.parseGasPriceUpdatesLogs(logs) +} + +func (p *PriceRegistry) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + logs, err := p.lp.LogsCreatedAfter( + ctx, + p.gasUpdated, + p.address, + ts, + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + return p.parseGasPriceUpdatesLogs(logs) +} + +func (p *PriceRegistry) parseGasPriceUpdatesLogs(logs []logpoller.Log) ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + parsedLogs, err := ccipdata.ParseLogs[cciptypes.GasPriceUpdate]( + logs, + p.lggr, + func(log types.Log) (*cciptypes.GasPriceUpdate, error) { + p, err1 := p.priceRegistry.ParseUsdPerUnitGasUpdated(log) + if err1 != nil { + return nil, err1 + } + return &cciptypes.GasPriceUpdate{ + GasPrice: cciptypes.GasPrice{ + DestChainSelector: p.DestChain, + Value: p.Value, + }, + TimestampUnixSec: p.Timestamp, + }, nil + }, + ) + if err != nil { + return nil, err + } + + res := make([]cciptypes.GasPriceUpdateWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.GasPriceUpdateWithTxMeta{ + TxMeta: log.TxMeta, + GasPriceUpdate: log.Data, + }) + } + return res, nil +} + +func (p *PriceRegistry) GetTokensDecimals(ctx context.Context, tokenAddresses []cciptypes.Address) ([]uint8, error) { + evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAddresses...) + if err != nil { + return nil, err + } + + found := make(map[common.Address]bool) + tokenDecimals := make([]uint8, len(evmAddrs)) + for i, tokenAddress := range evmAddrs { + if v, ok := p.tokenDecimalsCache.Load(tokenAddress); ok { + if decimals, isUint8 := v.(uint8); isUint8 { + tokenDecimals[i] = decimals + found[tokenAddress] = true + } else { + p.lggr.Errorf("token decimals cache contains invalid type %T", v) + } + } + } + if len(found) == len(evmAddrs) { + return tokenDecimals, nil + } + + evmCalls := make([]rpclib.EvmCall, 0, len(evmAddrs)) + for _, tokenAddress := range evmAddrs { + if !found[tokenAddress] { + evmCalls = append(evmCalls, rpclib.NewEvmCall(abiERC20, "decimals", tokenAddress)) + } + } + + results, err := p.evmBatchCaller.BatchCall(ctx, 0, evmCalls) + if err != nil { + return nil, fmt.Errorf("batch call limit: %w", err) + } + + decimals, err := rpclib.ParseOutputs[uint8](results, func(d rpclib.DataAndErr) (uint8, error) { + return rpclib.ParseOutput[uint8](d, 0) + }) + if err != nil { + return nil, fmt.Errorf("parse outputs: %w", err) + } + + j := 0 + for i, tokenAddress := range evmAddrs { + if !found[tokenAddress] { + tokenDecimals[i] = decimals[j] + p.tokenDecimalsCache.Store(tokenAddress, tokenDecimals[i]) + j++ + } + } + return tokenDecimals, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go new file mode 100644 index 00000000000..34f832e17fc --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0/test_helpers.go @@ -0,0 +1,90 @@ +package v1_0_0 + +import ( + "encoding/binary" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +// ApplyPriceRegistryUpdate is a helper function used in tests only. +func ApplyPriceRegistryUpdate(t *testing.T, user *bind.TransactOpts, addr common.Address, ec client.Client, gasPrice []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) { + require.True(t, len(gasPrice) <= 2) + pr, err := price_registry_1_0_0.NewPriceRegistry(addr, ec) + require.NoError(t, err) + var tps []price_registry_1_0_0.InternalTokenPriceUpdate + for _, tp := range tokenPrices { + evmAddrs, err1 := ccipcalc.GenericAddrsToEvm(tp.Token) + assert.NoError(t, err1) + tps = append(tps, price_registry_1_0_0.InternalTokenPriceUpdate{ + SourceToken: evmAddrs[0], + UsdPerToken: tp.Value, + }) + } + dest := uint64(0) + gas := big.NewInt(0) + if len(gasPrice) >= 1 { + dest = gasPrice[0].DestChainSelector + gas = gasPrice[0].Value + } + _, err = pr.UpdatePrices(user, price_registry_1_0_0.InternalPriceUpdates{ + TokenPriceUpdates: tps, + DestChainSelector: dest, + UsdPerUnitGas: gas, + }) + require.NoError(t, err) + + for i := 1; i < len(gasPrice); i++ { + dest = gasPrice[i].DestChainSelector + gas = gasPrice[i].Value + _, err = pr.UpdatePrices(user, price_registry_1_0_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_0_0.InternalTokenPriceUpdate{}, + DestChainSelector: dest, + UsdPerUnitGas: gas, + }) + require.NoError(t, err) + } +} + +func CreateExecutionStateChangeEventLog(t *testing.T, seqNr uint64, blockNumber int64, messageID common.Hash) logpoller.Log { + tAbi, err := evm_2_evm_offramp.EVM2EVMOffRampMetaData.GetAbi() + require.NoError(t, err) + eseEvent, ok := tAbi.Events["ExecutionStateChanged"] + require.True(t, ok) + + logData, err := eseEvent.Inputs.NonIndexed().Pack(uint8(1), []byte("some return data")) + require.NoError(t, err) + seqNrBytes := make([]byte, 8) + binary.BigEndian.PutUint64(seqNrBytes, seqNr) + seqNrTopic := common.BytesToHash(seqNrBytes) + topic0 := evm_2_evm_offramp.EVM2EVMOffRampExecutionStateChanged{}.Topic() + + return logpoller.Log{ + Topics: [][]byte{ + topic0[:], + seqNrTopic[:], + messageID[:], + }, + Data: logData, + LogIndex: 1, + BlockHash: utils.RandomBytes32(), + BlockNumber: blockNumber, + EventSig: topic0, + Address: testutils.NewAddress(), + TxHash: utils.RandomBytes32(), + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go new file mode 100644 index 00000000000..d4d73219fc0 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_1_0/onramp.go @@ -0,0 +1,70 @@ +package v1_1_0 + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_1_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +var _ ccipdata.OnRampReader = &OnRamp{} + +// OnRamp The only difference that the plugins care about in 1.1 is that the dynamic config struct has changed. +type OnRamp struct { + *v1_0_0.OnRamp + onRamp *evm_2_evm_onramp_1_1_0.EVM2EVMOnRamp +} + +func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) { + onRamp, err := evm_2_evm_onramp_1_1_0.NewEVM2EVMOnRamp(onRampAddress, source) + if err != nil { + return nil, err + } + onRamp100, err := v1_0_0.NewOnRamp(lggr, sourceSelector, destSelector, onRampAddress, sourceLP, source) + if err != nil { + return nil, err + } + return &OnRamp{ + OnRamp: onRamp100, + onRamp: onRamp, + }, nil +} + +func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) { + config, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return "", err + } + return cciptypes.Address(config.Router.String()), nil +} + +func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) { + if o.onRamp == nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized") + } + legacyDynamicConfig, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return cciptypes.OnRampDynamicConfig{}, err + } + return cciptypes.OnRampDynamicConfig{ + Router: cciptypes.Address(legacyDynamicConfig.Router.String()), + MaxNumberOfTokensPerMsg: legacyDynamicConfig.MaxTokensLength, + DestGasOverhead: legacyDynamicConfig.DestGasOverhead, + DestGasPerPayloadByte: legacyDynamicConfig.DestGasPerPayloadByte, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: cciptypes.Address(legacyDynamicConfig.PriceRegistry.String()), + MaxDataBytes: legacyDynamicConfig.MaxDataSize, + MaxPerMsgGasLimit: uint32(legacyDynamicConfig.MaxGasLimit), + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go new file mode 100644 index 00000000000..7612e544195 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store.go @@ -0,0 +1,469 @@ +package v1_2_0 + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/types/query" + "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +var _ ccipdata.CommitStoreReader = &CommitStore{} + +type CommitStore struct { + // Static config + commitStore *commit_store_1_2_0.CommitStore + lggr logger.Logger + lp logpoller.LogPoller + address common.Address + estimator *gas.EvmFeeEstimator + sourceMaxGasPrice *big.Int + filters []logpoller.Filter + reportAcceptedSig common.Hash + reportAcceptedMaxSeqIndex int + commitReportArgs abi.Arguments + + // Dynamic config + configMu sync.RWMutex + gasPriceEstimator *prices.DAGasPriceEstimator + offchainConfig cciptypes.CommitOffchainConfig +} + +func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + staticConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.CommitStoreStaticConfig{}, err + } + return cciptypes.CommitStoreStaticConfig{ + ChainSelector: staticConfig.ChainSelector, + SourceChainSelector: staticConfig.SourceChainSelector, + OnRamp: cciptypes.Address(staticConfig.OnRamp.String()), + ArmProxy: cciptypes.Address(staticConfig.ArmProxy.String()), + }, nil +} + +func (c *CommitStore) EncodeCommitReport(_ context.Context, report cciptypes.CommitStoreReport) ([]byte, error) { + return EncodeCommitReport(c.commitReportArgs, report) +} + +func EncodeCommitReport(commitReportArgs abi.Arguments, report cciptypes.CommitStoreReport) ([]byte, error) { + var tokenPriceUpdates []commit_store_1_2_0.InternalTokenPriceUpdate + for _, tokenPriceUpdate := range report.TokenPrices { + tokenAddressEvm, err := ccipcalc.GenericAddrToEvm(tokenPriceUpdate.Token) + if err != nil { + return nil, fmt.Errorf("token price update address to evm: %w", err) + } + + tokenPriceUpdates = append(tokenPriceUpdates, commit_store_1_2_0.InternalTokenPriceUpdate{ + SourceToken: tokenAddressEvm, + UsdPerToken: tokenPriceUpdate.Value, + }) + } + + var gasPriceUpdates []commit_store_1_2_0.InternalGasPriceUpdate + for _, gasPriceUpdate := range report.GasPrices { + gasPriceUpdates = append(gasPriceUpdates, commit_store_1_2_0.InternalGasPriceUpdate{ + DestChainSelector: gasPriceUpdate.DestChainSelector, + UsdPerUnitGas: gasPriceUpdate.Value, + }) + } + + rep := commit_store_1_2_0.CommitStoreCommitReport{ + PriceUpdates: commit_store_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: tokenPriceUpdates, + GasPriceUpdates: gasPriceUpdates, + }, + Interval: commit_store_1_2_0.CommitStoreInterval{Min: report.Interval.Min, Max: report.Interval.Max}, + MerkleRoot: report.MerkleRoot, + } + return commitReportArgs.PackValues([]interface{}{rep}) +} + +func DecodeCommitReport(commitReportArgs abi.Arguments, report []byte) (cciptypes.CommitStoreReport, error) { + unpacked, err := commitReportArgs.Unpack(report) + if err != nil { + return cciptypes.CommitStoreReport{}, err + } + if len(unpacked) != 1 { + return cciptypes.CommitStoreReport{}, errors.New("expected single struct value") + } + + commitReport, ok := unpacked[0].(struct { + PriceUpdates struct { + TokenPriceUpdates []struct { + SourceToken common.Address `json:"sourceToken"` + UsdPerToken *big.Int `json:"usdPerToken"` + } `json:"tokenPriceUpdates"` + GasPriceUpdates []struct { + DestChainSelector uint64 `json:"destChainSelector"` + UsdPerUnitGas *big.Int `json:"usdPerUnitGas"` + } `json:"gasPriceUpdates"` + } `json:"priceUpdates"` + Interval struct { + Min uint64 `json:"min"` + Max uint64 `json:"max"` + } `json:"interval"` + MerkleRoot [32]byte `json:"merkleRoot"` + }) + if !ok { + return cciptypes.CommitStoreReport{}, errors.Errorf("invalid commit report got %T", unpacked[0]) + } + + var tokenPriceUpdates []cciptypes.TokenPrice + for _, u := range commitReport.PriceUpdates.TokenPriceUpdates { + tokenPriceUpdates = append(tokenPriceUpdates, cciptypes.TokenPrice{ + Token: cciptypes.Address(u.SourceToken.String()), + Value: u.UsdPerToken, + }) + } + + var gasPrices []cciptypes.GasPrice + for _, u := range commitReport.PriceUpdates.GasPriceUpdates { + gasPrices = append(gasPrices, cciptypes.GasPrice{ + DestChainSelector: u.DestChainSelector, + Value: u.UsdPerUnitGas, + }) + } + + return cciptypes.CommitStoreReport{ + TokenPrices: tokenPriceUpdates, + GasPrices: gasPrices, + Interval: cciptypes.CommitStoreInterval{ + Min: commitReport.Interval.Min, + Max: commitReport.Interval.Max, + }, + MerkleRoot: commitReport.MerkleRoot, + }, nil +} + +func (c *CommitStore) DecodeCommitReport(_ context.Context, report []byte) (cciptypes.CommitStoreReport, error) { + return DecodeCommitReport(c.commitReportArgs, report) +} + +func (c *CommitStore) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return c.commitStore.IsBlessed(&bind.CallOpts{Context: ctx}, root) +} + +func (c *CommitStore) OffchainConfig(context.Context) (cciptypes.CommitOffchainConfig, error) { + c.configMu.RLock() + defer c.configMu.RUnlock() + return c.offchainConfig, nil +} + +func (c *CommitStore) GasPriceEstimator(context.Context) (cciptypes.GasPriceEstimatorCommit, error) { + c.configMu.RLock() + defer c.configMu.RUnlock() + return c.gasPriceEstimator, nil +} + +func (c *CommitStore) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error { + c.configMu.RLock() + defer c.configMu.RUnlock() + c.estimator = &gpe + return nil +} + +func (c *CommitStore) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error { + c.configMu.RLock() + defer c.configMu.RUnlock() + c.sourceMaxGasPrice = sourceMaxGasPrice + return nil +} + +// Do not change the JSON format of this struct without consulting with the RDD people first. +type JSONCommitOffchainConfig struct { + SourceFinalityDepth uint32 + DestFinalityDepth uint32 + GasPriceHeartBeat config.Duration + DAGasPriceDeviationPPB uint32 + ExecGasPriceDeviationPPB uint32 + TokenPriceHeartBeat config.Duration + TokenPriceDeviationPPB uint32 + InflightCacheExpiry config.Duration + PriceReportingDisabled bool +} + +func (c JSONCommitOffchainConfig) Validate() error { + if c.GasPriceHeartBeat.Duration() == 0 { + return errors.New("must set GasPriceHeartBeat") + } + if c.ExecGasPriceDeviationPPB == 0 { + return errors.New("must set ExecGasPriceDeviationPPB") + } + if c.TokenPriceHeartBeat.Duration() == 0 { + return errors.New("must set TokenPriceHeartBeat") + } + if c.TokenPriceDeviationPPB == 0 { + return errors.New("must set TokenPriceDeviationPPB") + } + if c.InflightCacheExpiry.Duration() == 0 { + return errors.New("must set InflightCacheExpiry") + } + // DAGasPriceDeviationPPB is not validated because it can be 0 on non-rollups + + return nil +} + +func (c *CommitStore) ChangeConfig(_ context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) { + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onchainConfig) + if err != nil { + return "", err + } + + offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](offchainConfig) + if err != nil { + return "", err + } + c.configMu.Lock() + defer c.configMu.Unlock() + + if c.estimator == nil { + return "", fmt.Errorf("this CommitStore estimator is nil. SetGasEstimator should be called before ChangeConfig") + } + + if c.sourceMaxGasPrice == nil { + return "", fmt.Errorf("this CommitStore sourceMaxGasPrice is nil. SetSourceMaxGasPrice should be called before ChangeConfig") + } + + c.gasPriceEstimator = prices.NewDAGasPriceEstimator( + *c.estimator, + c.sourceMaxGasPrice, + int64(offchainConfigParsed.ExecGasPriceDeviationPPB), + int64(offchainConfigParsed.DAGasPriceDeviationPPB), + ) + c.offchainConfig = ccipdata.NewCommitOffchainConfig( + offchainConfigParsed.ExecGasPriceDeviationPPB, + offchainConfigParsed.GasPriceHeartBeat.Duration(), + offchainConfigParsed.TokenPriceDeviationPPB, + offchainConfigParsed.TokenPriceHeartBeat.Duration(), + offchainConfigParsed.InflightCacheExpiry.Duration(), + offchainConfigParsed.PriceReportingDisabled, + ) + + c.lggr.Infow("ChangeConfig", + "offchainConfig", offchainConfigParsed, + "onchainConfig", onchainConfigParsed, + ) + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil +} + +func (c *CommitStore) Close() error { + return logpollerutil.UnregisterLpFilters(c.lp, c.filters) +} + +func (c *CommitStore) parseReport(log types.Log) (*cciptypes.CommitStoreReport, error) { + repAccepted, err := c.commitStore.ParseReportAccepted(log) + if err != nil { + return nil, err + } + // Translate to common struct. + var tokenPrices []cciptypes.TokenPrice + for _, tpu := range repAccepted.Report.PriceUpdates.TokenPriceUpdates { + tokenPrices = append(tokenPrices, cciptypes.TokenPrice{ + Token: cciptypes.Address(tpu.SourceToken.String()), + Value: tpu.UsdPerToken, + }) + } + var gasPrices []cciptypes.GasPrice + for _, tpu := range repAccepted.Report.PriceUpdates.GasPriceUpdates { + gasPrices = append(gasPrices, cciptypes.GasPrice{ + DestChainSelector: tpu.DestChainSelector, + Value: tpu.UsdPerUnitGas, + }) + } + + return &cciptypes.CommitStoreReport{ + TokenPrices: tokenPrices, + GasPrices: gasPrices, + MerkleRoot: repAccepted.Report.MerkleRoot, + Interval: cciptypes.CommitStoreInterval{Min: repAccepted.Report.Interval.Min, Max: repAccepted.Report.Interval.Max}, + }, nil +} + +func (c *CommitStore) GetCommitReportMatchingSeqNum(ctx context.Context, seqNr uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + logs, err := c.lp.LogsDataWordBetween( + ctx, + c.reportAcceptedSig, + c.address, + c.reportAcceptedMaxSeqIndex-1, + c.reportAcceptedMaxSeqIndex, + logpoller.EvmWord(seqNr), + evmtypes.Confirmations(confs), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport]( + logs, + c.lggr, + c.parseReport, + ) + if err != nil { + return nil, err + } + + res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.CommitStoreReportWithTxMeta{ + TxMeta: log.TxMeta, + CommitStoreReport: log.Data, + }) + } + + if len(res) > 1 { + c.lggr.Errorw("More than one report found for seqNr", "seqNr", seqNr, "commitReports", parsedLogs) + return res[:1], nil + } + return res, nil +} + +func (c *CommitStore) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + latestBlock, err := c.lp.LatestBlock(ctx) + if err != nil { + return nil, err + } + + reportsQuery, err := query.Where( + c.address.String(), + logpoller.NewAddressFilter(c.address), + logpoller.NewEventSigFilter(c.reportAcceptedSig), + query.Timestamp(uint64(ts.Unix()), primitives.Gte), + logpoller.NewConfirmationsFilter(evmtypes.Confirmations(confs)), + ) + if err != nil { + return nil, err + } + + logs, err := c.lp.FilteredLogs( + ctx, + reportsQuery, + query.NewLimitAndSort(query.Limit{}, query.NewSortBySequence(query.Asc)), + "GetAcceptedCommitReportsGteTimestamp", + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.CommitStoreReport](logs, c.lggr, c.parseReport) + if err != nil { + return nil, fmt.Errorf("parse logs: %w", err) + } + + res := make([]cciptypes.CommitStoreReportWithTxMeta, 0, len(parsedLogs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.CommitStoreReportWithTxMeta{ + TxMeta: log.TxMeta.WithFinalityStatus(uint64(latestBlock.FinalizedBlockNumber)), + CommitStoreReport: log.Data, + }) + } + return res, nil +} + +func (c *CommitStore) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + return c.commitStore.GetExpectedNextSequenceNumber(&bind.CallOpts{Context: ctx}) +} + +func (c *CommitStore) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + return c.commitStore.GetLatestPriceEpochAndRound(&bind.CallOpts{Context: ctx}) +} + +func (c *CommitStore) IsDestChainHealthy(context.Context) (bool, error) { + if err := c.lp.Healthy(); err != nil { + return false, nil + } + return true, nil +} + +func (c *CommitStore) IsDown(ctx context.Context) (bool, error) { + unPausedAndHealthy, err := c.commitStore.IsUnpausedAndARMHealthy(&bind.CallOpts{Context: ctx}) + if err != nil { + return true, err + } + return !unPausedAndHealthy, nil +} + +func (c *CommitStore) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + var hashes [][32]byte + for _, msg := range report.Messages { + hashes = append(hashes, msg.Hash) + } + res, err := c.commitStore.Verify(&bind.CallOpts{Context: ctx}, hashes, report.Proofs, report.ProofFlagBits) + if err != nil { + c.lggr.Errorw("Unable to call verify", "messages", report.Messages, "err", err) + return false, nil + } + // No timestamp, means failed to verify root. + if res.Cmp(big.NewInt(0)) == 0 { + c.lggr.Errorw("Root does not verify", "messages", report.Messages) + return false, nil + } + return true, nil +} + +func (c *CommitStore) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(c.lp, c.filters) +} + +func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) { + commitStore, err := commit_store_1_2_0.NewCommitStore(addr, ec) + if err != nil { + return nil, err + } + commitStoreABI := abihelpers.MustParseABI(commit_store_1_2_0.CommitStoreABI) + eventSig := abihelpers.MustGetEventID(v1_0_0.ReportAccepted, commitStoreABI) + commitReportArgs := abihelpers.MustGetEventInputs(v1_0_0.ReportAccepted, commitStoreABI) + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(v1_0_0.EXEC_REPORT_ACCEPTS, addr.String()), + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{addr}, + Retention: ccipdata.CommitExecLogsRetention, + }, + } + + return &CommitStore{ + commitStore: commitStore, + address: addr, + lggr: lggr, + lp: lp, + + // Note that sourceMaxGasPrice and estimator now have explicit setters (CCIP-2493) + + filters: filters, + commitReportArgs: commitReportArgs, + reportAcceptedSig: eventSig, + // offset || priceUpdatesOffset || minSeqNum || maxSeqNum || merkleRoot + reportAcceptedMaxSeqIndex: 3, + configMu: sync.RWMutex{}, + + // The fields below are initially empty and set on ChangeConfig method + offchainConfig: cciptypes.CommitOffchainConfig{}, + gasPriceEstimator: nil, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go new file mode 100644 index 00000000000..8b293096339 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/commit_store_test.go @@ -0,0 +1,224 @@ +package v1_2_0 + +import ( + "math/big" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" +) + +func TestCommitReportEncoding(t *testing.T) { + t.Parallel() + ctx := testutils.Context(t) + report := cciptypes.CommitStoreReport{ + TokenPrices: []cciptypes.TokenPrice{ + { + Token: cciptypes.Address(utils.RandomAddress().String()), + Value: big.NewInt(9e18), + }, + { + Token: cciptypes.Address(utils.RandomAddress().String()), + Value: big.NewInt(1e18), + }, + }, + GasPrices: []cciptypes.GasPrice{ + { + DestChainSelector: rand.Uint64(), + Value: big.NewInt(2000e9), + }, + { + DestChainSelector: rand.Uint64(), + Value: big.NewInt(3000e9), + }, + }, + MerkleRoot: [32]byte{123}, + Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}, + } + + c, err := NewCommitStore(logger.TestLogger(t), utils.RandomAddress(), nil, mocks.NewLogPoller(t)) + assert.NoError(t, err) + + encodedReport, err := c.EncodeCommitReport(ctx, report) + require.NoError(t, err) + assert.Greater(t, len(encodedReport), 0) + + decodedReport, err := c.DecodeCommitReport(ctx, encodedReport) + require.NoError(t, err) + require.Equal(t, report, decodedReport) +} + +func TestCommitStoreV120ffchainConfigEncoding(t *testing.T) { + t.Parallel() + validConfig := JSONCommitOffchainConfig{ + SourceFinalityDepth: 3, + DestFinalityDepth: 4, + GasPriceHeartBeat: *config.MustNewDuration(1 * time.Minute), + DAGasPriceDeviationPPB: 10, + ExecGasPriceDeviationPPB: 11, + TokenPriceHeartBeat: *config.MustNewDuration(2 * time.Minute), + TokenPriceDeviationPPB: 12, + InflightCacheExpiry: *config.MustNewDuration(3 * time.Minute), + } + + require.NoError(t, validConfig.Validate()) + + tests := []struct { + name string + want JSONCommitOffchainConfig + errPattern string + }{ + { + name: "legacy offchain config format parses", + want: validConfig, + }, + { + name: "can omit finality depth", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.SourceFinalityDepth = 0 + c.DestFinalityDepth = 0 + }), + }, + { + name: "can set PriceReportingDisabled", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.PriceReportingDisabled = true + }), + }, + { + name: "must set GasPriceHeartBeat", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.GasPriceHeartBeat = *config.MustNewDuration(0) + }), + errPattern: "GasPriceHeartBeat", + }, + { + name: "must set ExecGasPriceDeviationPPB", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.ExecGasPriceDeviationPPB = 0 + }), + errPattern: "ExecGasPriceDeviationPPB", + }, + { + name: "must set TokenPriceHeartBeat", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.TokenPriceHeartBeat = *config.MustNewDuration(0) + }), + errPattern: "TokenPriceHeartBeat", + }, + { + name: "must set TokenPriceDeviationPPB", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.TokenPriceDeviationPPB = 0 + }), + errPattern: "TokenPriceDeviationPPB", + }, + { + name: "must set InflightCacheExpiry", + want: modifyCopy(validConfig, func(c *JSONCommitOffchainConfig) { + c.InflightCacheExpiry = *config.MustNewDuration(0) + }), + errPattern: "InflightCacheExpiry", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + exp := tc.want + encode, err := ccipconfig.EncodeOffchainConfig(&exp) + require.NoError(t, err) + got, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](encode) + + if tc.errPattern != "" { + require.ErrorContains(t, err, tc.errPattern) + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} + +func TestCommitStoreV120ffchainConfigDecodingCompatibility(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + config []byte + priceReportingDisabled bool + }{ + { + name: "with MaxGasPrice", + config: []byte(`{ + "SourceFinalityDepth": 3, + "DestFinalityDepth": 4, + "GasPriceHeartBeat": "60s", + "DAGasPriceDeviationPPB": 10, + "ExecGasPriceDeviationPPB": 11, + "TokenPriceHeartBeat": "120s", + "TokenPriceDeviationPPB": 12, + "MaxGasPrice": 100000000, + "SourceMaxGasPrice": 100000000, + "InflightCacheExpiry": "180s" + }`), + priceReportingDisabled: false, + }, + { + name: "without MaxGasPrice", + config: []byte(`{ + "SourceFinalityDepth": 3, + "DestFinalityDepth": 4, + "GasPriceHeartBeat": "60s", + "DAGasPriceDeviationPPB": 10, + "ExecGasPriceDeviationPPB": 11, + "TokenPriceHeartBeat": "120s", + "TokenPriceDeviationPPB": 12, + "InflightCacheExpiry": "180s" + }`), + priceReportingDisabled: false, + }, + { + name: "with PriceReportingDisabled", + config: []byte(`{ + "SourceFinalityDepth": 3, + "DestFinalityDepth": 4, + "GasPriceHeartBeat": "60s", + "DAGasPriceDeviationPPB": 10, + "ExecGasPriceDeviationPPB": 11, + "TokenPriceHeartBeat": "120s", + "TokenPriceDeviationPPB": 12, + "InflightCacheExpiry": "180s", + "PriceReportingDisabled": true + }`), + priceReportingDisabled: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + decoded, err := ccipconfig.DecodeOffchainConfig[JSONCommitOffchainConfig](tc.config) + require.NoError(t, err) + require.Equal(t, JSONCommitOffchainConfig{ + SourceFinalityDepth: 3, + DestFinalityDepth: 4, + GasPriceHeartBeat: *config.MustNewDuration(1 * time.Minute), + DAGasPriceDeviationPPB: 10, + ExecGasPriceDeviationPPB: 11, + TokenPriceHeartBeat: *config.MustNewDuration(2 * time.Minute), + TokenPriceDeviationPPB: 12, + InflightCacheExpiry: *config.MustNewDuration(3 * time.Minute), + PriceReportingDisabled: tc.priceReportingDisabled, + }, decoded) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go new file mode 100644 index 00000000000..4739c946c36 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher.go @@ -0,0 +1,101 @@ +package v1_2_0 + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +const ( + MetaDataHashPrefix = "EVM2EVMMessageHashV2" +) + +type LeafHasher struct { + metaDataHash [32]byte + ctx hashutil.Hasher[[32]byte] + onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp +} + +func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp) *LeafHasher { + return &LeafHasher{ + metaDataHash: v1_0_0.GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector), + ctx: ctx, + onRamp: onRamp, + } +} + +func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) { + msg, err := t.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return [32]byte{}, err + } + message := msg.Message + encodedTokens, err := abihelpers.ABIEncode( + `[ +{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.TokenAmounts) + if err != nil { + return [32]byte{}, err + } + + bytesArray, err := abi.NewType("bytes[]", "bytes[]", nil) + if err != nil { + return [32]byte{}, err + } + + encodedSourceTokenData, err := abi.Arguments{abi.Argument{Type: bytesArray}}.PackValues([]interface{}{message.SourceTokenData}) + if err != nil { + return [32]byte{}, err + } + + packedFixedSizeValues, err := abihelpers.ABIEncode( + `[ +{"name": "sender", "type":"address"}, +{"name": "receiver", "type":"address"}, +{"name": "sequenceNumber", "type":"uint64"}, +{"name": "gasLimit", "type":"uint256"}, +{"name": "strict", "type":"bool"}, +{"name": "nonce", "type":"uint64"}, +{"name": "feeToken","type": "address"}, +{"name": "feeTokenAmount","type": "uint256"} +]`, + message.Sender, + message.Receiver, + message.SequenceNumber, + message.GasLimit, + message.Strict, + message.Nonce, + message.FeeToken, + message.FeeTokenAmount, + ) + if err != nil { + return [32]byte{}, err + } + fixedSizeValuesHash := t.ctx.Hash(packedFixedSizeValues) + + packedValues, err := abihelpers.ABIEncode( + `[ +{"name": "leafDomainSeparator","type":"bytes1"}, +{"name": "metadataHash", "type":"bytes32"}, +{"name": "fixedSizeValuesHash", "type":"bytes32"}, +{"name": "dataHash", "type":"bytes32"}, +{"name": "tokenAmountsHash", "type":"bytes32"}, +{"name": "sourceTokenDataHash", "type":"bytes32"} +]`, + v1_0_0.LeafDomainSeparator, + t.metaDataHash, + fixedSizeValuesHash, + t.ctx.Hash(message.Data), + t.ctx.Hash(encodedTokens), + t.ctx.Hash(encodedSourceTokenData), + ) + if err != nil { + return [32]byte{}, err + } + return t.ctx.Hash(packedValues), nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go new file mode 100644 index 00000000000..4bfbf7295e6 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/hasher_test.go @@ -0,0 +1,78 @@ +package v1_2_0 + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +func TestHasherV1_2_0(t *testing.T) { + sourceChainSelector, destChainSelector := uint64(1), uint64(4) + onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001") + onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp_1_2_0.EVM2EVMOnRampABI) + + hashingCtx := hashutil.NewKeccak() + ramp, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampAddress, nil) + require.NoError(t, err) + hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp) + + message := evm_2_evm_onramp_1_2_0.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1), + Data: []byte{}, + TokenAmounts: []evm_2_evm_onramp_1_2_0.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}}, + SourceTokenData: [][]byte{}, + MessageId: [32]byte{}, + } + + data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "46ad031bfb052db2e4a2514fed8dc480b98e5ce4acb55d5640d91407e0d8a3e9", hex.EncodeToString(hash[:])) + + message = evm_2_evm_onramp_1_2_0.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1e12), + Data: []byte("foo bar baz"), + TokenAmounts: []evm_2_evm_onramp_1_2_0.ClientEVMTokenAmount{ + {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}, + {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)}, + }, + SourceTokenData: [][]byte{{0x2, 0x1}}, + MessageId: [32]byte{}, + } + + data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "4362a13a42e52ff5ce4324e7184dc7aa41704c3146bc842d35d95b94b32a78b6", hex.EncodeToString(hash[:])) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go new file mode 100644 index 00000000000..fa00894b380 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp.go @@ -0,0 +1,340 @@ +package v1_2_0 + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +var ( + abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp_1_2_0.EVM2EVMOffRampABI) + _ ccipdata.OffRampReader = &OffRamp{} +) + +type ExecOnchainConfig evm_2_evm_offramp_1_2_0.EVM2EVMOffRampDynamicConfig + +func (d ExecOnchainConfig) AbiString() string { + return ` + [ + { + "components": [ + {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"}, + {"name": "router", "type": "address"}, + {"name": "priceRegistry", "type": "address"}, + {"name": "maxNumberOfTokensPerMsg", "type": "uint16"}, + {"name": "maxDataBytes", "type": "uint32"}, + {"name": "maxPoolReleaseOrMintGas", "type": "uint32"} + ], + "type": "tuple" + } + ]` +} + +func (d ExecOnchainConfig) Validate() error { + if d.PermissionLessExecutionThresholdSeconds == 0 { + return errors.New("must set PermissionLessExecutionThresholdSeconds") + } + if d.Router == (common.Address{}) { + return errors.New("must set Router address") + } + if d.PriceRegistry == (common.Address{}) { + return errors.New("must set PriceRegistry address") + } + if d.MaxNumberOfTokensPerMsg == 0 { + return errors.New("must set MaxNumberOfTokensPerMsg") + } + if d.MaxPoolReleaseOrMintGas == 0 { + return errors.New("must set MaxPoolReleaseOrMintGas") + } + return nil +} + +// JSONExecOffchainConfig is the configuration for nodes executing committed CCIP messages (v1.2). +// It comes from the OffchainConfig field of the corresponding OCR2 plugin configuration. +// NOTE: do not change the JSON format of this struct without consulting with the RDD people first. +type JSONExecOffchainConfig struct { + // SourceFinalityDepth indicates how many confirmations a transaction should get on the source chain event before we consider it finalized. + // + // Deprecated: we now use the source chain finality instead. + SourceFinalityDepth uint32 + // See [ccipdata.ExecOffchainConfig.DestOptimisticConfirmations] + DestOptimisticConfirmations uint32 + // DestFinalityDepth indicates how many confirmations a transaction should get on the destination chain event before we consider it finalized. + // + // Deprecated: we now use the destination chain finality instead. + DestFinalityDepth uint32 + // See [ccipdata.ExecOffchainConfig.BatchGasLimit] + BatchGasLimit uint32 + // See [ccipdata.ExecOffchainConfig.RelativeBoostPerWaitHour] + RelativeBoostPerWaitHour float64 + // See [ccipdata.ExecOffchainConfig.InflightCacheExpiry] + InflightCacheExpiry config.Duration + // See [ccipdata.ExecOffchainConfig.RootSnoozeTime] + RootSnoozeTime config.Duration + // See [ccipdata.ExecOffchainConfig.BatchingStrategyID] + BatchingStrategyID uint32 + // See [ccipdata.ExecOffchainConfig.MessageVisibilityInterval] + MessageVisibilityInterval config.Duration +} + +func (c JSONExecOffchainConfig) Validate() error { + if c.DestOptimisticConfirmations == 0 { + return errors.New("must set DestOptimisticConfirmations") + } + if c.BatchGasLimit == 0 { + return errors.New("must set BatchGasLimit") + } + if c.RelativeBoostPerWaitHour == 0 { + return errors.New("must set RelativeBoostPerWaitHour") + } + if c.InflightCacheExpiry.Duration() == 0 { + return errors.New("must set InflightCacheExpiry") + } + if c.RootSnoozeTime.Duration() == 0 { + return errors.New("must set RootSnoozeTime") + } + + return nil +} + +// OffRamp In 1.2 we have a different estimator impl +type OffRamp struct { + *v1_0_0.OffRamp + offRampV120 evm_2_evm_offramp_1_2_0.EVM2EVMOffRampInterface +} + +func (o *OffRamp) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) { + bucket, err := o.offRampV120.CurrentRateLimiterState(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.TokenBucketRateLimit{}, err + } + return cciptypes.TokenBucketRateLimit{ + Tokens: bucket.Tokens, + LastUpdated: bucket.LastUpdated, + IsEnabled: bucket.IsEnabled, + Capacity: bucket.Capacity, + Rate: bucket.Rate, + }, nil +} + +func (o *OffRamp) GetRouter(ctx context.Context) (cciptypes.Address, error) { + dynamicConfig, err := o.offRampV120.GetDynamicConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return "", err + } + return ccipcalc.EvmAddrToGeneric(dynamicConfig.Router), nil +} + +func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) { + // Same as the v1.0.0 method, except for the ExecOnchainConfig type. + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes) + if err != nil { + return "", "", err + } + + offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](offchainConfigBytes) + if err != nil { + return "", "", err + } + destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client) + if err != nil { + return "", "", err + } + destWrappedNative, err := destRouter.GetWrappedNative(nil) + if err != nil { + return "", "", err + } + offchainConfig := cciptypes.ExecOffchainConfig{ + DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations, + BatchGasLimit: offchainConfigParsed.BatchGasLimit, + RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour, + InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry, + RootSnoozeTime: offchainConfigParsed.RootSnoozeTime, + MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval, + BatchingStrategyID: offchainConfigParsed.BatchingStrategyID, + } + onchainConfig := cciptypes.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds), + Router: cciptypes.Address(onchainConfigParsed.Router.String()), + } + priceEstimator := prices.NewDAGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0, 0) + + o.UpdateDynamicConfig(onchainConfig, offchainConfig, priceEstimator) + + o.Logger.Infow("Starting exec plugin", + "offchainConfig", onchainConfigParsed, + "onchainConfig", offchainConfigParsed) + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), + cciptypes.Address(destWrappedNative.String()), nil +} + +func EncodeExecutionReport(ctx context.Context, args abi.Arguments, report cciptypes.ExecReport) ([]byte, error) { + var msgs []evm_2_evm_offramp_1_2_0.InternalEVM2EVMMessage + for _, msg := range report.Messages { + var ta []evm_2_evm_offramp_1_2_0.ClientEVMTokenAmount + for _, tokenAndAmount := range msg.TokenAmounts { + evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokenAndAmount.Token) + if err != nil { + return nil, err + } + ta = append(ta, evm_2_evm_offramp_1_2_0.ClientEVMTokenAmount{ + Token: evmAddrs[0], + Amount: tokenAndAmount.Amount, + }) + } + + evmAddrs, err := ccipcalc.GenericAddrsToEvm(msg.Sender, msg.Receiver, msg.FeeToken) + if err != nil { + return nil, err + } + + msgs = append(msgs, evm_2_evm_offramp_1_2_0.InternalEVM2EVMMessage{ + SourceChainSelector: msg.SourceChainSelector, + Sender: evmAddrs[0], + Receiver: evmAddrs[1], + SequenceNumber: msg.SequenceNumber, + GasLimit: msg.GasLimit, + Strict: msg.Strict, + Nonce: msg.Nonce, + FeeToken: evmAddrs[2], + FeeTokenAmount: msg.FeeTokenAmount, + Data: msg.Data, + TokenAmounts: ta, + MessageId: msg.MessageID, + // NOTE: this field is new in v1.2. + SourceTokenData: msg.SourceTokenData, + }) + } + + rep := evm_2_evm_offramp_1_2_0.InternalExecutionReport{ + Messages: msgs, + OffchainTokenData: report.OffchainTokenData, + Proofs: report.Proofs, + ProofFlagBits: report.ProofFlagBits, + } + return args.PackValues([]interface{}{&rep}) +} + +func (o *OffRamp) EncodeExecutionReport(ctx context.Context, report cciptypes.ExecReport) ([]byte, error) { + return EncodeExecutionReport(ctx, o.ExecutionReportArgs, report) +} + +func DecodeExecReport(ctx context.Context, args abi.Arguments, report []byte) (cciptypes.ExecReport, error) { + unpacked, err := args.Unpack(report) + if err != nil { + return cciptypes.ExecReport{}, err + } + if len(unpacked) == 0 { + return cciptypes.ExecReport{}, errors.New("assumptionViolation: expected at least one element") + } + // Must be anonymous struct here + erStruct, ok := unpacked[0].(struct { + Messages []struct { + SourceChainSelector uint64 `json:"sourceChainSelector"` + Sender common.Address `json:"sender"` + Receiver common.Address `json:"receiver"` + SequenceNumber uint64 `json:"sequenceNumber"` + GasLimit *big.Int `json:"gasLimit"` + Strict bool `json:"strict"` + Nonce uint64 `json:"nonce"` + FeeToken common.Address `json:"feeToken"` + FeeTokenAmount *big.Int `json:"feeTokenAmount"` + Data []uint8 `json:"data"` + TokenAmounts []struct { + Token common.Address `json:"token"` + Amount *big.Int `json:"amount"` + } `json:"tokenAmounts"` + SourceTokenData [][]uint8 `json:"sourceTokenData"` + MessageId [32]uint8 `json:"messageId"` + } `json:"messages"` + OffchainTokenData [][][]uint8 `json:"offchainTokenData"` + Proofs [][32]uint8 `json:"proofs"` + ProofFlagBits *big.Int `json:"proofFlagBits"` + }) + if !ok { + return cciptypes.ExecReport{}, fmt.Errorf("got %T", unpacked[0]) + } + messages := make([]cciptypes.EVM2EVMMessage, 0, len(erStruct.Messages)) + for _, msg := range erStruct.Messages { + var tokensAndAmounts []cciptypes.TokenAmount + for _, tokenAndAmount := range msg.TokenAmounts { + tokensAndAmounts = append(tokensAndAmounts, cciptypes.TokenAmount{ + Token: cciptypes.Address(tokenAndAmount.Token.String()), + Amount: tokenAndAmount.Amount, + }) + } + messages = append(messages, cciptypes.EVM2EVMMessage{ + SequenceNumber: msg.SequenceNumber, + GasLimit: msg.GasLimit, + Nonce: msg.Nonce, + MessageID: msg.MessageId, + SourceChainSelector: msg.SourceChainSelector, + Sender: cciptypes.Address(msg.Sender.String()), + Receiver: cciptypes.Address(msg.Receiver.String()), + Strict: msg.Strict, + FeeToken: cciptypes.Address(msg.FeeToken.String()), + FeeTokenAmount: msg.FeeTokenAmount, + Data: msg.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: msg.SourceTokenData, + // TODO: Not needed for plugins, but should be recomputed for consistency. + // Requires the offramp knowing about onramp version + Hash: [32]byte{}, + }) + } + + // Unpack will populate with big.Int{false, } for 0 values, + // which is different from the expected big.NewInt(0). Rebuild to the expected value for this case. + return cciptypes.ExecReport{ + Messages: messages, + OffchainTokenData: erStruct.OffchainTokenData, + Proofs: erStruct.Proofs, + ProofFlagBits: new(big.Int).SetBytes(erStruct.ProofFlagBits.Bytes()), + }, nil +} + +func (o *OffRamp) DecodeExecutionReport(ctx context.Context, report []byte) (cciptypes.ExecReport, error) { + return DecodeExecReport(ctx, o.ExecutionReportArgs, report) +} + +func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) { + v100, err := v1_0_0.NewOffRamp(lggr, addr, ec, lp, estimator, destMaxGasPrice) + if err != nil { + return nil, err + } + + offRamp, err := evm_2_evm_offramp_1_2_0.NewEVM2EVMOffRamp(addr, ec) + if err != nil { + return nil, err + } + + v100.ExecutionReportArgs = abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1] + + return &OffRamp{ + OffRamp: v100, + offRampV120: offRamp, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go new file mode 100644 index 00000000000..f87fc8842f6 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_test.go @@ -0,0 +1,38 @@ +package v1_2_0_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +func TestExecutionReportEncodingV120(t *testing.T) { + // Note could consider some fancier testing here (fuzz/property) + // but I think that would essentially be testing geth's abi library + // as our encode/decode is a thin wrapper around that. + report := cciptypes.ExecReport{ + Messages: []cciptypes.EVM2EVMMessage{}, + OffchainTokenData: [][][]byte{{}}, + Proofs: [][32]byte{testutils.Random32Byte()}, + ProofFlagBits: big.NewInt(133), + } + + offRamp, err := v1_2_0.NewOffRamp(logger.TestLogger(t), utils.RandomAddress(), nil, lpmocks.NewLogPoller(t), nil, nil) + require.NoError(t, err) + + ctx := testutils.Context(t) + encodeExecutionReport, err := offRamp.EncodeExecutionReport(ctx, report) + require.NoError(t, err) + decodeCommitReport, err := offRamp.DecodeExecutionReport(ctx, encodeExecutionReport) + require.NoError(t, err) + require.Equal(t, report.Proofs, decodeCommitReport.Proofs) + require.Equal(t, report, decodeCommitReport) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go new file mode 100644 index 00000000000..98454ce59b2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_reader_unit_test.go @@ -0,0 +1,36 @@ +package v1_2_0 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +func TestGetRouter(t *testing.T) { + routerAddr := utils.RandomAddress() + + mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t) + mockOffRamp.On("GetDynamicConfig", mock.Anything).Return(evm_2_evm_offramp_1_2_0.EVM2EVMOffRampDynamicConfig{ + Router: routerAddr, + }, nil) + + offRamp := OffRamp{ + offRampV120: mockOffRamp, + } + + ctx := testutils.Context(t) + gotRouterAddr, err := offRamp.GetRouter(ctx) + require.NoError(t, err) + + gotRouterEvmAddr, err := ccipcalc.GenericAddrToEvm(gotRouterAddr) + require.NoError(t, err) + assert.Equal(t, routerAddr, gotRouterEvmAddr) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go new file mode 100644 index 00000000000..7d174d5db71 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/offramp_test.go @@ -0,0 +1,173 @@ +package v1_2_0 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" +) + +func modifyCopy[T any](c T, f func(c *T)) T { + f(&c) + return c +} + +func TestExecOffchainConfig120_Encoding(t *testing.T) { + t.Parallel() + validConfig := JSONExecOffchainConfig{ + SourceFinalityDepth: 3, + DestOptimisticConfirmations: 6, + DestFinalityDepth: 3, + BatchGasLimit: 5_000_000, + RelativeBoostPerWaitHour: 0.07, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + RootSnoozeTime: *config.MustNewDuration(128 * time.Minute), + BatchingStrategyID: 0, + } + + tests := []struct { + name string + want JSONExecOffchainConfig + errPattern string + }{ + { + name: "legacy offchain config format parses", + want: validConfig, + }, + { + name: "can omit finality depth", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.SourceFinalityDepth = 0 + c.DestFinalityDepth = 0 + }), + }, + { + name: "must set BatchGasLimit", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.BatchGasLimit = 0 + }), + errPattern: "BatchGasLimit", + }, + { + name: "must set DestOptimisticConfirmations", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.DestOptimisticConfirmations = 0 + }), + errPattern: "DestOptimisticConfirmations", + }, + { + name: "must set RelativeBoostPerWaitHour", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.RelativeBoostPerWaitHour = 0 + }), + errPattern: "RelativeBoostPerWaitHour", + }, + { + name: "must set InflightCacheExpiry", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.InflightCacheExpiry = *config.MustNewDuration(0) + }), + errPattern: "InflightCacheExpiry", + }, + { + name: "must set RootSnoozeTime", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.RootSnoozeTime = *config.MustNewDuration(0) + }), + errPattern: "RootSnoozeTime", + }, + { + name: "must set BatchingStrategyId", + want: modifyCopy(validConfig, func(c *JSONExecOffchainConfig) { + c.BatchingStrategyID = 1 + }), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + exp := tc.want + encode, err := ccipconfig.EncodeOffchainConfig(&exp) + require.NoError(t, err) + got, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](encode) + + if tc.errPattern != "" { + require.ErrorContains(t, err, tc.errPattern) + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} + +func TestExecOffchainConfig120_ParseRawJson(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + config []byte + }{ + { + name: "with MaxGasPrice", + config: []byte(`{ + "DestOptimisticConfirmations": 6, + "BatchGasLimit": 5000000, + "RelativeBoostPerWaitHour": 0.07, + "MaxGasPrice": 200000000000, + "InflightCacheExpiry": "64s", + "RootSnoozeTime": "128m" + }`), + }, + { + name: "without MaxGasPrice", + config: []byte(`{ + "DestOptimisticConfirmations": 6, + "BatchGasLimit": 5000000, + "RelativeBoostPerWaitHour": 0.07, + "InflightCacheExpiry": "64s", + "RootSnoozeTime": "128m" + }`), + }, + { + name: "with BatchingStrategyId", + config: []byte(`{ + "DestOptimisticConfirmations": 6, + "BatchGasLimit": 5000000, + "RelativeBoostPerWaitHour": 0.07, + "InflightCacheExpiry": "64s", + "RootSnoozeTime": "128m", + "BatchingStrategyId": 1 + }`), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + decoded, err := ccipconfig.DecodeOffchainConfig[JSONExecOffchainConfig](tc.config) + require.NoError(t, err) + + if tc.name == "with BatchingStrategyId" { + require.Equal(t, JSONExecOffchainConfig{ + DestOptimisticConfirmations: 6, + BatchGasLimit: 5_000_000, + RelativeBoostPerWaitHour: 0.07, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + RootSnoozeTime: *config.MustNewDuration(128 * time.Minute), + BatchingStrategyID: 1, // Actual value + }, decoded) + } else { + require.Equal(t, JSONExecOffchainConfig{ + DestOptimisticConfirmations: 6, + BatchGasLimit: 5_000_000, + RelativeBoostPerWaitHour: 0.07, + InflightCacheExpiry: *config.MustNewDuration(64 * time.Second), + RootSnoozeTime: *config.MustNewDuration(128 * time.Minute), + BatchingStrategyID: 0, // Default + }, decoded) + } + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go new file mode 100644 index 00000000000..f727d7fd5fa --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp.go @@ -0,0 +1,255 @@ +package v1_2_0 + +import ( + "context" + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" +) + +var ( + // Backwards compat for integration tests + CCIPSendRequestEventSig common.Hash + ConfigSetEventSig common.Hash +) + +const ( + CCIPSendRequestSeqNumIndex = 4 + CCIPSendRequestedEventName = "CCIPSendRequested" + ConfigSetEventName = "ConfigSet" +) + +func init() { + onRampABI, err := abi.JSON(strings.NewReader(evm_2_evm_onramp_1_2_0.EVM2EVMOnRampABI)) + if err != nil { + panic(err) + } + CCIPSendRequestEventSig = abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI) + ConfigSetEventSig = abihelpers.MustGetEventID(ConfigSetEventName, onRampABI) +} + +var _ ccipdata.OnRampReader = &OnRamp{} + +// Significant change in 1.2: +// - CCIPSendRequested event signature has changed +type OnRamp struct { + onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp + address common.Address + lggr logger.Logger + lp logpoller.LogPoller + leafHasher ccipdata.LeafHasherInterface[[32]byte] + client client.Client + sendRequestedEventSig common.Hash + sendRequestedSeqNumberWord int + filters []logpoller.Filter + cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address] + // Static config can be cached, because it's never expected to change. + // The only way to change that is through the contract's constructor (redeployment) + cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig] + cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract] +} + +func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) { + onRamp, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampAddress, source) + if err != nil { + return nil, err + } + // Subscribe to the relevant logs + // Note we can keep the same prefix across 1.0/1.1 and 1.2 because the onramp addresses will be different + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress), + EventSigs: []common.Hash{CCIPSendRequestEventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CommitExecLogsRetention, + }, + { + Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress), + EventSigs: []common.Hash{ConfigSetEventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + } + cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp_1_2_0.EVM2EVMOnRampStaticConfig, error) { + return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx}) + }) + cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) { + staticConfig, err := cachedStaticConfig(ctx) + if err != nil { + return nil, err + } + + return arm_contract.NewARMContract(staticConfig.ArmProxy, source) + }) + return &OnRamp{ + lggr: lggr, + client: source, + lp: sourceLP, + leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp), + onRamp: onRamp, + filters: filters, + address: onRampAddress, + sendRequestedSeqNumberWord: CCIPSendRequestSeqNumIndex, + sendRequestedEventSig: CCIPSendRequestEventSig, + cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address]( + sourceLP, + []common.Hash{ConfigSetEventSig}, + onRampAddress, + ), + cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig), + cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract), + }, nil +} + +func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) { + return cciptypes.Address(o.onRamp.Address().String()), nil +} + +func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) { + if o.onRamp == nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized") + } + config, err := o.onRamp.GetDynamicConfig(&bind.CallOpts{}) + if err != nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("get dynamic config v1.2: %w", err) + } + return cciptypes.OnRampDynamicConfig{ + Router: cciptypes.Address(config.Router.String()), + MaxNumberOfTokensPerMsg: config.MaxNumberOfTokensPerMsg, + DestGasOverhead: config.DestGasOverhead, + DestGasPerPayloadByte: config.DestGasPerPayloadByte, + DestDataAvailabilityOverheadGas: config.DestDataAvailabilityOverheadGas, + DestGasPerDataAvailabilityByte: config.DestGasPerDataAvailabilityByte, + DestDataAvailabilityMultiplierBps: config.DestDataAvailabilityMultiplierBps, + PriceRegistry: cciptypes.Address(config.PriceRegistry.String()), + MaxDataBytes: config.MaxDataBytes, + MaxPerMsgGasLimit: config.MaxPerMsgGasLimit, + }, nil +} + +func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) { + return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) { + c, err := o.GetDynamicConfig(ctx) + if err != nil { + return "", err + } + return c.PriceRegistry, nil + }) +} + +func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) { + logs, err := o.lp.LogsDataWordRange( + ctx, + o.sendRequestedEventSig, + o.address, + o.sendRequestedSeqNumberWord, + logpoller.EvmWord(seqNumMin), + logpoller.EvmWord(seqNumMax), + ccipdata.LogsConfirmations(finalized), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage) + if err != nil { + return nil, err + } + + res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(logs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{ + TxMeta: log.TxMeta, + EVM2EVMMessage: log.Data, + }) + } + + return res, nil +} + +func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) { + config, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return "", err + } + return cciptypes.Address(config.Router.String()), nil +} + +func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) { + if err := o.lp.Healthy(); err != nil { + return false, nil + } + return true, nil +} + +func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) { + arm, err := o.cachedRmnContract(ctx) + if err != nil { + return false, fmt.Errorf("intializing Arm contract through the ArmProxy: %w", err) + } + + cursed, err := arm.IsCursed0(&bind.CallOpts{Context: ctx}) + if err != nil { + return false, fmt.Errorf("checking if source Arm is cursed: %w", err) + } + return cursed, nil +} + +func (o *OnRamp) Close() error { + return logpollerutil.UnregisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) { + msg, err := o.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return nil, err + } + h, err := o.leafHasher.HashLeaf(log) + if err != nil { + return nil, err + } + tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts)) + for i, tokenAndAmount := range msg.Message.TokenAmounts { + tokensAndAmounts[i] = cciptypes.TokenAmount{ + Token: cciptypes.Address(tokenAndAmount.Token.String()), + Amount: tokenAndAmount.Amount, + } + } + + return &cciptypes.EVM2EVMMessage{ + SequenceNumber: msg.Message.SequenceNumber, + GasLimit: msg.Message.GasLimit, + Nonce: msg.Message.Nonce, + MessageID: msg.Message.MessageId, + SourceChainSelector: msg.Message.SourceChainSelector, + Sender: cciptypes.Address(msg.Message.Sender.String()), + Receiver: cciptypes.Address(msg.Message.Receiver.String()), + Strict: msg.Message.Strict, + FeeToken: cciptypes.Address(msg.Message.FeeToken.String()), + FeeTokenAmount: msg.Message.FeeTokenAmount, + Data: msg.Message.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: msg.Message.SourceTokenData, // Breaking change 1.2 + Hash: h, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go new file mode 100644 index 00000000000..ec912667ac7 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/onramp_test.go @@ -0,0 +1,57 @@ +package v1_2_0 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +func TestLogPollerClient_GetSendRequestsBetweenSeqNumsV1_2_0(t *testing.T) { + onRampAddr := utils.RandomAddress() + seqNum := uint64(100) + limit := uint64(10) + lggr := logger.TestLogger(t) + + tests := []struct { + name string + finalized bool + confirmations evmtypes.Confirmations + }{ + {"finalized", true, evmtypes.Finalized}, + {"unfinalized", false, evmtypes.Confirmations(0)}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lp := mocks.NewLogPoller(t) + onRampV2, err := NewOnRamp(lggr, 1, 1, onRampAddr, lp, nil) + require.NoError(t, err) + + lp.On("LogsDataWordRange", + mock.Anything, + onRampV2.sendRequestedEventSig, + onRampAddr, + onRampV2.sendRequestedSeqNumberWord, + abihelpers.EvmWord(seqNum), + abihelpers.EvmWord(seqNum+limit), + tt.confirmations, + ).Once().Return([]logpoller.Log{}, nil) + + events, err1 := onRampV2.GetSendRequestsBetweenSeqNums(context.Background(), seqNum, seqNum+limit, tt.finalized) + assert.NoError(t, err1) + assert.Empty(t, events) + + lp.AssertExpectations(t) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go new file mode 100644 index 00000000000..9aac30e6123 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/price_registry.go @@ -0,0 +1,68 @@ +package v1_2_0 + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +var ( + _ ccipdata.PriceRegistryReader = &PriceRegistry{} +) + +type PriceRegistry struct { + *v1_0_0.PriceRegistry + pr *price_registry_1_2_0.PriceRegistry +} + +func NewPriceRegistry(lggr logger.Logger, priceRegistryAddr common.Address, lp logpoller.LogPoller, ec client.Client, registerFilters bool) (*PriceRegistry, error) { + v100, err := v1_0_0.NewPriceRegistry(lggr, priceRegistryAddr, lp, ec, registerFilters) + if err != nil { + return nil, err + } + priceRegistry, err := price_registry_1_2_0.NewPriceRegistry(priceRegistryAddr, ec) + if err != nil { + return nil, err + } + return &PriceRegistry{ + PriceRegistry: v100, + pr: priceRegistry, + }, nil +} + +// GetTokenPrices must be overridden to use the 1.2 ABI (return parameter changed from uint192 to uint224) +// See https://github.com/smartcontractkit/ccip/blob/ccip-develop/contracts/src/v0.8/ccip/PriceRegistry.sol#L141 +func (p *PriceRegistry) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) { + evmAddrs, err := ccipcalc.GenericAddrsToEvm(wantedTokens...) + if err != nil { + return nil, err + } + + // Make call using 224 ABI. + tps, err := p.pr.GetTokenPrices(&bind.CallOpts{Context: ctx}, evmAddrs) + if err != nil { + return nil, err + } + var tpu []cciptypes.TokenPriceUpdate + for i, tp := range tps { + tpu = append(tpu, cciptypes.TokenPriceUpdate{ + TokenPrice: cciptypes.TokenPrice{ + Token: wantedTokens[i], + Value: tp.Value, + }, + TimestampUnixSec: big.NewInt(int64(tp.Timestamp)), + }) + } + return tpu, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go new file mode 100644 index 00000000000..e7972d5f5fe --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/test_helpers.go @@ -0,0 +1,48 @@ +package v1_2_0 + +import ( + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +// ApplyPriceRegistryUpdate is a helper function used in tests only. +func ApplyPriceRegistryUpdate(t *testing.T, user *bind.TransactOpts, addr common.Address, ec client.Client, gasPrices []cciptypes.GasPrice, tokenPrices []cciptypes.TokenPrice) common.Hash { + require.True(t, len(gasPrices) <= 2) + pr, err := price_registry.NewPriceRegistry(addr, ec) + require.NoError(t, err) + o, err := pr.Owner(nil) + require.NoError(t, err) + require.Equal(t, user.From, o) + var tps []price_registry.InternalTokenPriceUpdate + for _, tp := range tokenPrices { + evmAddrs, err1 := ccipcalc.GenericAddrsToEvm(tp.Token) + assert.NoError(t, err1) + tps = append(tps, price_registry.InternalTokenPriceUpdate{ + SourceToken: evmAddrs[0], + UsdPerToken: tp.Value, + }) + } + var gps []price_registry.InternalGasPriceUpdate + for _, gp := range gasPrices { + gps = append(gps, price_registry.InternalGasPriceUpdate{ + DestChainSelector: gp.DestChainSelector, + UsdPerUnitGas: gp.Value, + }) + } + tx, err := pr.UpdatePrices(user, price_registry.InternalPriceUpdates{ + TokenPriceUpdates: tps, + GasPriceUpdates: gps, + }) + require.NoError(t, err) + return tx.Hash() +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go new file mode 100644 index 00000000000..a0850ebb2e9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool.go @@ -0,0 +1,48 @@ +package v1_2_0 + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +var ( + poolABI = abihelpers.MustParseABI(burn_mint_token_pool_1_2_0.BurnMintTokenPoolABI) +) + +var _ ccipdata.TokenPoolReader = &TokenPool{} + +type TokenPool struct { + addr common.Address + OffRampAddress common.Address + poolType string +} + +func NewTokenPool(poolType string, addr common.Address, offRampAddress common.Address) *TokenPool { + return &TokenPool{ + addr: addr, + OffRampAddress: offRampAddress, + poolType: poolType, + } +} + +func (p *TokenPool) Address() common.Address { + return p.addr +} + +func (p *TokenPool) Type() string { + return p.poolType +} + +func GetInboundTokenPoolRateLimitCall(tokenPoolAddress common.Address, offRampAddress common.Address) rpclib.EvmCall { + return rpclib.NewEvmCall( + poolABI, + "currentOffRampRateLimiterState", + tokenPoolAddress, + offRampAddress, + ) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go new file mode 100644 index 00000000000..3308ab05cec --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0/token_pool_test.go @@ -0,0 +1,24 @@ +package v1_2_0 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +func TestTokenPool(t *testing.T) { + addr := utils.RandomAddress() + offRamp := utils.RandomAddress() + poolType := "BurnMint" + + tokenPool := NewTokenPool(poolType, addr, offRamp) + + assert.Equal(t, addr, tokenPool.Address()) + assert.Equal(t, poolType, tokenPool.Type()) + + inboundRateLimitCall := GetInboundTokenPoolRateLimitCall(addr, offRamp) + + assert.Equal(t, "currentOffRampRateLimiterState", inboundRateLimitCall.MethodName()) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go new file mode 100644 index 00000000000..caf652b9e4e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool.go @@ -0,0 +1,48 @@ +package v1_4_0 + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +var ( + poolABI = abihelpers.MustParseABI(burn_mint_token_pool_1_4_0.BurnMintTokenPoolABI) +) + +var _ ccipdata.TokenPoolReader = &TokenPool{} + +type TokenPool struct { + addr common.Address + RemoteChainSelector uint64 + poolType string +} + +func NewTokenPool(poolType string, addr common.Address, remoteChainSelector uint64) *TokenPool { + return &TokenPool{ + addr: addr, + RemoteChainSelector: remoteChainSelector, + poolType: poolType, + } +} + +func (p *TokenPool) Address() common.Address { + return p.addr +} + +func (p *TokenPool) Type() string { + return p.poolType +} + +func GetInboundTokenPoolRateLimitCall(tokenPoolAddress common.Address, remoteChainSelector uint64) rpclib.EvmCall { + return rpclib.NewEvmCall( + poolABI, + "getCurrentInboundRateLimiterState", + tokenPoolAddress, + remoteChainSelector, + ) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go new file mode 100644 index 00000000000..8aaddc3312e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_4_0/token_pool_test.go @@ -0,0 +1,24 @@ +package v1_4_0 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +func TestTokenPool(t *testing.T) { + addr := utils.RandomAddress() + chainSelector := uint64(2000) + poolType := "BurnMint" + + tokenPool := NewTokenPool(poolType, addr, chainSelector) + + assert.Equal(t, addr, tokenPool.Address()) + assert.Equal(t, poolType, tokenPool.Type()) + + inboundRateLimitCall := GetInboundTokenPoolRateLimitCall(addr, chainSelector) + + assert.Equal(t, "getCurrentInboundRateLimiterState", inboundRateLimitCall.MethodName()) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go new file mode 100644 index 00000000000..3bb582f3a21 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/commit_store.go @@ -0,0 +1,59 @@ +package v1_5_0 + +import ( + "context" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" +) + +type CommitStore struct { + *v1_2_0.CommitStore + commitStore *commit_store.CommitStore +} + +func (c *CommitStore) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + staticConfig, err := c.commitStore.GetStaticConfig(&bind.CallOpts{Context: ctx}) + if err != nil { + return cciptypes.CommitStoreStaticConfig{}, err + } + return cciptypes.CommitStoreStaticConfig{ + ChainSelector: staticConfig.ChainSelector, + SourceChainSelector: staticConfig.SourceChainSelector, + OnRamp: cciptypes.Address(staticConfig.OnRamp.String()), + ArmProxy: cciptypes.Address(staticConfig.RmnProxy.String()), + }, nil +} + +func (c *CommitStore) IsDown(ctx context.Context) (bool, error) { + unPausedAndNotCursed, err := c.commitStore.IsUnpausedAndNotCursed(&bind.CallOpts{Context: ctx}) + if err != nil { + return true, err + } + return !unPausedAndNotCursed, nil +} + +func NewCommitStore(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller) (*CommitStore, error) { + v120, err := v1_2_0.NewCommitStore(lggr, addr, ec, lp) + if err != nil { + return nil, err + } + + commitStore, err := commit_store.NewCommitStore(addr, ec) + if err != nil { + return nil, err + } + + return &CommitStore{ + commitStore: commitStore, + CommitStore: v120, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go new file mode 100644 index 00000000000..a00ec376cdb --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher.go @@ -0,0 +1,101 @@ +package v1_5_0 + +import ( + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" +) + +const ( + MetaDataHashPrefix = "EVM2EVMMessageHashV2" +) + +type LeafHasher struct { + metaDataHash [32]byte + ctx hashutil.Hasher[[32]byte] + onRamp *evm_2_evm_onramp.EVM2EVMOnRamp +} + +func NewLeafHasher(sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte], onRamp *evm_2_evm_onramp.EVM2EVMOnRamp) *LeafHasher { + return &LeafHasher{ + metaDataHash: v1_0_0.GetMetaDataHash(ctx, ctx.Hash([]byte(MetaDataHashPrefix)), sourceChainSelector, onRampId, destChainSelector), + ctx: ctx, + onRamp: onRamp, + } +} + +func (t *LeafHasher) HashLeaf(log types.Log) ([32]byte, error) { + msg, err := t.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return [32]byte{}, err + } + message := msg.Message + encodedTokens, err := abihelpers.ABIEncode( + `[ +{"components": [{"name":"token","type":"address"},{"name":"amount","type":"uint256"}], "type":"tuple[]"}]`, message.TokenAmounts) + if err != nil { + return [32]byte{}, err + } + + bytesArray, err := abi.NewType("bytes[]", "bytes[]", nil) + if err != nil { + return [32]byte{}, err + } + + encodedSourceTokenData, err := abi.Arguments{abi.Argument{Type: bytesArray}}.PackValues([]interface{}{message.SourceTokenData}) + if err != nil { + return [32]byte{}, err + } + + packedFixedSizeValues, err := abihelpers.ABIEncode( + `[ +{"name": "sender", "type":"address"}, +{"name": "receiver", "type":"address"}, +{"name": "sequenceNumber", "type":"uint64"}, +{"name": "gasLimit", "type":"uint256"}, +{"name": "strict", "type":"bool"}, +{"name": "nonce", "type":"uint64"}, +{"name": "feeToken","type": "address"}, +{"name": "feeTokenAmount","type": "uint256"} +]`, + message.Sender, + message.Receiver, + message.SequenceNumber, + message.GasLimit, + message.Strict, + message.Nonce, + message.FeeToken, + message.FeeTokenAmount, + ) + if err != nil { + return [32]byte{}, err + } + fixedSizeValuesHash := t.ctx.Hash(packedFixedSizeValues) + + packedValues, err := abihelpers.ABIEncode( + `[ +{"name": "leafDomainSeparator","type":"bytes1"}, +{"name": "metadataHash", "type":"bytes32"}, +{"name": "fixedSizeValuesHash", "type":"bytes32"}, +{"name": "dataHash", "type":"bytes32"}, +{"name": "tokenAmountsHash", "type":"bytes32"}, +{"name": "sourceTokenDataHash", "type":"bytes32"} +]`, + v1_0_0.LeafDomainSeparator, + t.metaDataHash, + fixedSizeValuesHash, + t.ctx.Hash(message.Data), + t.ctx.Hash(encodedTokens), + t.ctx.Hash(encodedSourceTokenData), + ) + if err != nil { + return [32]byte{}, err + } + return t.ctx.Hash(packedValues), nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go new file mode 100644 index 00000000000..2a585f7bd1e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/hasher_test.go @@ -0,0 +1,78 @@ +package v1_5_0 + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +func TestHasherV1_4_0(t *testing.T) { + sourceChainSelector, destChainSelector := uint64(1), uint64(4) + onRampAddress := common.HexToAddress("0x5550000000000000000000000000000000000001") + onRampABI := abihelpers.MustParseABI(evm_2_evm_onramp.EVM2EVMOnRampABI) + + hashingCtx := hashutil.NewKeccak() + ramp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, nil) + require.NoError(t, err) + hasher := NewLeafHasher(sourceChainSelector, destChainSelector, onRampAddress, hashingCtx, ramp) + + message := evm_2_evm_onramp.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1), + Data: []byte{}, + TokenAmounts: []evm_2_evm_onramp.ClientEVMTokenAmount{{Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}}, + SourceTokenData: [][]byte{}, + MessageId: [32]byte{}, + } + + data, err := onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err := hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "46ad031bfb052db2e4a2514fed8dc480b98e5ce4acb55d5640d91407e0d8a3e9", hex.EncodeToString(hash[:])) + + message = evm_2_evm_onramp.InternalEVM2EVMMessage{ + SourceChainSelector: sourceChainSelector, + Sender: common.HexToAddress("0x1110000000000000000000000000000000000001"), + Receiver: common.HexToAddress("0x2220000000000000000000000000000000000001"), + SequenceNumber: 1337, + GasLimit: big.NewInt(100), + Strict: false, + Nonce: 1337, + FeeToken: common.Address{}, + FeeTokenAmount: big.NewInt(1e12), + Data: []byte("foo bar baz"), + TokenAmounts: []evm_2_evm_onramp.ClientEVMTokenAmount{ + {Token: common.HexToAddress("0x4440000000000000000000000000000000000001"), Amount: big.NewInt(12345678900)}, + {Token: common.HexToAddress("0x6660000000000000000000000000000000000001"), Amount: big.NewInt(4204242)}, + }, + SourceTokenData: [][]byte{{0x2, 0x1}}, + MessageId: [32]byte{}, + } + + data, err = onRampABI.Events[CCIPSendRequestedEventName].Inputs.Pack(message) + require.NoError(t, err) + hash, err = hasher.HashLeaf(types.Log{Topics: []common.Hash{CCIPSendRequestEventSig}, Data: data}) + require.NoError(t, err) + + // NOTE: Must match spec + require.Equal(t, "4362a13a42e52ff5ce4324e7184dc7aa41704c3146bc842d35d95b94b32a78b6", hex.EncodeToString(hash[:])) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go new file mode 100644 index 00000000000..cac61c67878 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp.go @@ -0,0 +1,199 @@ +package v1_5_0 + +import ( + "context" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +var ( + abiOffRamp = abihelpers.MustParseABI(evm_2_evm_offramp.EVM2EVMOffRampABI) + _ ccipdata.OffRampReader = &OffRamp{} + RateLimitTokenAddedEvent = abihelpers.MustGetEventID("TokenAggregateRateLimitAdded", abiOffRamp) + RateLimitTokenRemovedEvent = abihelpers.MustGetEventID("TokenAggregateRateLimitRemoved", abiOffRamp) +) + +type ExecOnchainConfig evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig + +func (d ExecOnchainConfig) AbiString() string { + return ` + [ + { + "components": [ + {"name": "permissionLessExecutionThresholdSeconds", "type": "uint32"}, + {"name": "maxDataBytes", "type": "uint32"}, + {"name": "maxNumberOfTokensPerMsg", "type": "uint16"}, + {"name": "router", "type": "address"}, + {"name": "priceRegistry", "type": "address"}, + {"name": "maxPoolReleaseOrMintGas", "type": "uint32"}, + {"name": "maxTokenTransferGas", "type": "uint32"} + ], + "type": "tuple" + } + ]` +} + +func (d ExecOnchainConfig) Validate() error { + if d.PermissionLessExecutionThresholdSeconds == 0 { + return errors.New("must set PermissionLessExecutionThresholdSeconds") + } + if d.Router == (common.Address{}) { + return errors.New("must set Router address") + } + if d.PriceRegistry == (common.Address{}) { + return errors.New("must set PriceRegistry address") + } + if d.MaxNumberOfTokensPerMsg == 0 { + return errors.New("must set MaxNumberOfTokensPerMsg") + } + if d.MaxPoolReleaseOrMintGas == 0 { + return errors.New("must set MaxPoolReleaseOrMintGas") + } + if d.MaxTokenTransferGas == 0 { + return errors.New("must set MaxTokenTransferGas") + } + return nil +} + +type OffRamp struct { + *v1_2_0.OffRamp + offRampV150 evm_2_evm_offramp.EVM2EVMOffRampInterface + cachedRateLimitTokens cache.AutoSync[cciptypes.OffRampTokens] +} + +// GetTokens Returns no data as the offRamps no longer have this information. +func (o *OffRamp) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) { + sourceTokens, destTokens, err := o.GetSourceAndDestRateLimitTokens(ctx) + if err != nil { + return cciptypes.OffRampTokens{}, err + } + return cciptypes.OffRampTokens{ + SourceTokens: sourceTokens, + DestinationTokens: destTokens, + }, nil +} + +func (o *OffRamp) GetSourceAndDestRateLimitTokens(ctx context.Context) (sourceTokens []cciptypes.Address, destTokens []cciptypes.Address, err error) { + cachedTokens, err := o.cachedRateLimitTokens.Get(ctx, func(ctx context.Context) (cciptypes.OffRampTokens, error) { + tokens, err2 := o.offRampV150.GetAllRateLimitTokens(&bind.CallOpts{Context: ctx}) + if err2 != nil { + return cciptypes.OffRampTokens{}, err2 + } + + if len(tokens.SourceTokens) != len(tokens.DestTokens) { + return cciptypes.OffRampTokens{}, errors.New("source and destination tokens are not the same length") + } + + return cciptypes.OffRampTokens{ + DestinationTokens: ccipcalc.EvmAddrsToGeneric(tokens.DestTokens...), + SourceTokens: ccipcalc.EvmAddrsToGeneric(tokens.SourceTokens...), + }, nil + }) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get rate limit tokens, if token set is large (~400k) batching may be needed") + } + return cachedTokens.SourceTokens, cachedTokens.DestinationTokens, nil +} + +func (o *OffRamp) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) { + sourceTokens, destTokens, err := o.GetSourceAndDestRateLimitTokens(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to get rate limit tokens, if token set is large (~400k) batching may be needed") + } + + if sourceTokens == nil || destTokens == nil { + return nil, errors.New("source or destination tokens are nil") + } + + mapping := make(map[cciptypes.Address]cciptypes.Address) + for i, sourceToken := range sourceTokens { + mapping[sourceToken] = destTokens[i] + } + return mapping, nil +} + +func (o *OffRamp) ChangeConfig(ctx context.Context, onchainConfigBytes []byte, offchainConfigBytes []byte) (cciptypes.Address, cciptypes.Address, error) { + // Same as the v1.2.0 method, except for the ExecOnchainConfig type. + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ExecOnchainConfig](onchainConfigBytes) + if err != nil { + return "", "", err + } + + offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[v1_2_0.JSONExecOffchainConfig](offchainConfigBytes) + if err != nil { + return "", "", err + } + destRouter, err := router.NewRouter(onchainConfigParsed.Router, o.Client) + if err != nil { + return "", "", err + } + destWrappedNative, err := destRouter.GetWrappedNative(nil) + if err != nil { + return "", "", err + } + offchainConfig := cciptypes.ExecOffchainConfig{ + DestOptimisticConfirmations: offchainConfigParsed.DestOptimisticConfirmations, + BatchGasLimit: offchainConfigParsed.BatchGasLimit, + RelativeBoostPerWaitHour: offchainConfigParsed.RelativeBoostPerWaitHour, + InflightCacheExpiry: offchainConfigParsed.InflightCacheExpiry, + RootSnoozeTime: offchainConfigParsed.RootSnoozeTime, + MessageVisibilityInterval: offchainConfigParsed.MessageVisibilityInterval, + BatchingStrategyID: offchainConfigParsed.BatchingStrategyID, + } + onchainConfig := cciptypes.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: time.Second * time.Duration(onchainConfigParsed.PermissionLessExecutionThresholdSeconds), + Router: cciptypes.Address(onchainConfigParsed.Router.String()), + } + priceEstimator := prices.NewDAGasPriceEstimator(o.Estimator, o.DestMaxGasPrice, 0, 0) + + o.UpdateDynamicConfig(onchainConfig, offchainConfig, priceEstimator) + + o.Logger.Infow("Starting exec plugin", + "offchainConfig", onchainConfigParsed, + "onchainConfig", offchainConfigParsed) + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), + cciptypes.Address(destWrappedNative.String()), nil +} + +func NewOffRamp(lggr logger.Logger, addr common.Address, ec client.Client, lp logpoller.LogPoller, estimator gas.EvmFeeEstimator, destMaxGasPrice *big.Int) (*OffRamp, error) { + v120, err := v1_2_0.NewOffRamp(lggr, addr, ec, lp, estimator, destMaxGasPrice) + if err != nil { + return nil, err + } + + offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(addr, ec) + if err != nil { + return nil, err + } + + v120.ExecutionReportArgs = abihelpers.MustGetMethodInputs("manuallyExecute", abiOffRamp)[:1] + + return &OffRamp{ + OffRamp: v120, + offRampV150: offRamp, + cachedRateLimitTokens: cache.NewLogpollerEventsBased[cciptypes.OffRampTokens]( + lp, + []common.Hash{RateLimitTokenAddedEvent, RateLimitTokenRemovedEvent}, + offRamp.Address(), + ), + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go new file mode 100644 index 00000000000..a95445ec028 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/offramp_test.go @@ -0,0 +1 @@ +package v1_5_0 diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go new file mode 100644 index 00000000000..481933f89ad --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp.go @@ -0,0 +1,259 @@ +package v1_5_0 + +import ( + "context" + "fmt" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/cache" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/logpollerutil" +) + +var ( + // Backwards compat for integration tests + CCIPSendRequestEventSig common.Hash + ConfigSetEventSig common.Hash +) + +const ( + CCIPSendRequestSeqNumIndex = 4 + CCIPSendRequestedEventName = "CCIPSendRequested" + ConfigSetEventName = "ConfigSet" +) + +func init() { + onRampABI, err := abi.JSON(strings.NewReader(evm_2_evm_onramp.EVM2EVMOnRampABI)) + if err != nil { + panic(err) + } + CCIPSendRequestEventSig = abihelpers.MustGetEventID(CCIPSendRequestedEventName, onRampABI) + ConfigSetEventSig = abihelpers.MustGetEventID(ConfigSetEventName, onRampABI) +} + +var _ ccipdata.OnRampReader = &OnRamp{} + +type OnRamp struct { + onRamp *evm_2_evm_onramp.EVM2EVMOnRamp + address common.Address + destChainSelectorBytes [16]byte + lggr logger.Logger + lp logpoller.LogPoller + leafHasher ccipdata.LeafHasherInterface[[32]byte] + client client.Client + sendRequestedEventSig common.Hash + sendRequestedSeqNumberWord int + filters []logpoller.Filter + cachedSourcePriceRegistryAddress cache.AutoSync[cciptypes.Address] + // Static config can be cached, because it's never expected to change. + // The only way to change that is through the contract's constructor (redeployment) + cachedStaticConfig cache.OnceCtxFunction[evm_2_evm_onramp.EVM2EVMOnRampStaticConfig] + cachedRmnContract cache.OnceCtxFunction[*arm_contract.ARMContract] +} + +func NewOnRamp(lggr logger.Logger, sourceSelector, destSelector uint64, onRampAddress common.Address, sourceLP logpoller.LogPoller, source client.Client) (*OnRamp, error) { + onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, source) + if err != nil { + return nil, err + } + + // Subscribe to the relevant logs + // Note we can keep the same prefix across 1.0/1.1 and 1.2 because the onramp addresses will be different + filters := []logpoller.Filter{ + { + Name: logpoller.FilterName(ccipdata.COMMIT_CCIP_SENDS, onRampAddress), + EventSigs: []common.Hash{CCIPSendRequestEventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CommitExecLogsRetention, + }, + { + Name: logpoller.FilterName(ccipdata.CONFIG_CHANGED, onRampAddress), + EventSigs: []common.Hash{ConfigSetEventSig}, + Addresses: []common.Address{onRampAddress}, + Retention: ccipdata.CacheEvictionLogsRetention, + }, + } + cachedStaticConfig := cache.OnceCtxFunction[evm_2_evm_onramp.EVM2EVMOnRampStaticConfig](func(ctx context.Context) (evm_2_evm_onramp.EVM2EVMOnRampStaticConfig, error) { + return onRamp.GetStaticConfig(&bind.CallOpts{Context: ctx}) + }) + cachedRmnContract := cache.OnceCtxFunction[*arm_contract.ARMContract](func(ctx context.Context) (*arm_contract.ARMContract, error) { + staticConfig, err := cachedStaticConfig(ctx) + if err != nil { + return nil, err + } + + return arm_contract.NewARMContract(staticConfig.RmnProxy, source) + }) + + return &OnRamp{ + lggr: lggr, + client: source, + destChainSelectorBytes: ccipcommon.SelectorToBytes(destSelector), + lp: sourceLP, + leafHasher: NewLeafHasher(sourceSelector, destSelector, onRampAddress, hashutil.NewKeccak(), onRamp), + onRamp: onRamp, + filters: filters, + address: onRampAddress, + sendRequestedSeqNumberWord: CCIPSendRequestSeqNumIndex, + sendRequestedEventSig: CCIPSendRequestEventSig, + cachedSourcePriceRegistryAddress: cache.NewLogpollerEventsBased[cciptypes.Address]( + sourceLP, + []common.Hash{ConfigSetEventSig}, + onRampAddress, + ), + cachedStaticConfig: cache.CallOnceOnNoError(cachedStaticConfig), + cachedRmnContract: cache.CallOnceOnNoError(cachedRmnContract), + }, nil +} + +func (o *OnRamp) Address(context.Context) (cciptypes.Address, error) { + return ccipcalc.EvmAddrToGeneric(o.onRamp.Address()), nil +} + +func (o *OnRamp) GetDynamicConfig(context.Context) (cciptypes.OnRampDynamicConfig, error) { + if o.onRamp == nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("onramp not initialized") + } + config, err := o.onRamp.GetDynamicConfig(&bind.CallOpts{}) + if err != nil { + return cciptypes.OnRampDynamicConfig{}, fmt.Errorf("get dynamic config v1.5: %w", err) + } + return cciptypes.OnRampDynamicConfig{ + Router: ccipcalc.EvmAddrToGeneric(config.Router), + MaxNumberOfTokensPerMsg: config.MaxNumberOfTokensPerMsg, + DestGasOverhead: config.DestGasOverhead, + DestGasPerPayloadByte: config.DestGasPerPayloadByte, + DestDataAvailabilityOverheadGas: config.DestDataAvailabilityOverheadGas, + DestGasPerDataAvailabilityByte: config.DestGasPerDataAvailabilityByte, + DestDataAvailabilityMultiplierBps: config.DestDataAvailabilityMultiplierBps, + PriceRegistry: ccipcalc.EvmAddrToGeneric(config.PriceRegistry), + MaxDataBytes: config.MaxDataBytes, + MaxPerMsgGasLimit: config.MaxPerMsgGasLimit, + }, nil +} + +func (o *OnRamp) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) { + return o.cachedSourcePriceRegistryAddress.Get(ctx, func(ctx context.Context) (cciptypes.Address, error) { + c, err := o.GetDynamicConfig(ctx) + if err != nil { + return "", err + } + return c.PriceRegistry, nil + }) +} + +func (o *OnRamp) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) { + logs, err := o.lp.LogsDataWordRange( + ctx, + o.sendRequestedEventSig, + o.address, + o.sendRequestedSeqNumberWord, + logpoller.EvmWord(seqNumMin), + logpoller.EvmWord(seqNumMax), + ccipdata.LogsConfirmations(finalized), + ) + if err != nil { + return nil, err + } + + parsedLogs, err := ccipdata.ParseLogs[cciptypes.EVM2EVMMessage](logs, o.lggr, o.logToMessage) + if err != nil { + return nil, err + } + + res := make([]cciptypes.EVM2EVMMessageWithTxMeta, 0, len(logs)) + for _, log := range parsedLogs { + res = append(res, cciptypes.EVM2EVMMessageWithTxMeta{ + TxMeta: log.TxMeta, + EVM2EVMMessage: log.Data, + }) + } + return res, nil +} + +func (o *OnRamp) RouterAddress(context.Context) (cciptypes.Address, error) { + config, err := o.onRamp.GetDynamicConfig(nil) + if err != nil { + return "", err + } + return ccipcalc.EvmAddrToGeneric(config.Router), nil +} + +func (o *OnRamp) IsSourceChainHealthy(context.Context) (bool, error) { + if err := o.lp.Healthy(); err != nil { + return false, nil + } + return true, nil +} + +func (o *OnRamp) IsSourceCursed(ctx context.Context) (bool, error) { + arm, err := o.cachedRmnContract(ctx) + if err != nil { + return false, fmt.Errorf("initializing RMN contract through the RmnProxy: %w", err) + } + + cursed, err := arm.IsCursed(&bind.CallOpts{Context: ctx}, o.destChainSelectorBytes) + if err != nil { + return false, fmt.Errorf("checking if source is cursed by RMN: %w", err) + } + return cursed, nil +} + +func (o *OnRamp) Close() error { + return logpollerutil.UnregisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) RegisterFilters() error { + return logpollerutil.RegisterLpFilters(o.lp, o.filters) +} + +func (o *OnRamp) logToMessage(log types.Log) (*cciptypes.EVM2EVMMessage, error) { + msg, err := o.onRamp.ParseCCIPSendRequested(log) + if err != nil { + return nil, err + } + h, err := o.leafHasher.HashLeaf(log) + if err != nil { + return nil, err + } + tokensAndAmounts := make([]cciptypes.TokenAmount, len(msg.Message.TokenAmounts)) + for i, tokenAndAmount := range msg.Message.TokenAmounts { + tokensAndAmounts[i] = cciptypes.TokenAmount{ + Token: ccipcalc.EvmAddrToGeneric(tokenAndAmount.Token), + Amount: tokenAndAmount.Amount, + } + } + + return &cciptypes.EVM2EVMMessage{ + SequenceNumber: msg.Message.SequenceNumber, + GasLimit: msg.Message.GasLimit, + Nonce: msg.Message.Nonce, + MessageID: msg.Message.MessageId, + SourceChainSelector: msg.Message.SourceChainSelector, + Sender: ccipcalc.EvmAddrToGeneric(msg.Message.Sender), + Receiver: ccipcalc.EvmAddrToGeneric(msg.Message.Receiver), + Strict: msg.Message.Strict, + FeeToken: ccipcalc.EvmAddrToGeneric(msg.Message.FeeToken), + FeeTokenAmount: msg.Message.FeeTokenAmount, + Data: msg.Message.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: msg.Message.SourceTokenData, // Breaking change 1.2 + Hash: h, + }, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go new file mode 100644 index 00000000000..3ca360c8ff2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0/onramp_test.go @@ -0,0 +1,210 @@ +package v1_5_0 + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +func TestLogPollerClient_GetSendRequestsBetweenSeqNums1_4_0(t *testing.T) { + onRampAddr := utils.RandomAddress() + seqNum := uint64(100) + limit := uint64(10) + lggr := logger.TestLogger(t) + + tests := []struct { + name string + finalized bool + confirmations evmtypes.Confirmations + }{ + {"finalized", true, evmtypes.Finalized}, + {"unfinalized", false, evmtypes.Confirmations(0)}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lp := mocks.NewLogPoller(t) + onRampV2, err := NewOnRamp(lggr, 1, 1, onRampAddr, lp, nil) + require.NoError(t, err) + + lp.On("LogsDataWordRange", + mock.Anything, + onRampV2.sendRequestedEventSig, + onRampAddr, + onRampV2.sendRequestedSeqNumberWord, + abihelpers.EvmWord(seqNum), + abihelpers.EvmWord(seqNum+limit), + tt.confirmations, + ).Once().Return([]logpoller.Log{}, nil) + + events, err1 := onRampV2.GetSendRequestsBetweenSeqNums(context.Background(), seqNum, seqNum+limit, tt.finalized) + assert.NoError(t, err1) + assert.Empty(t, events) + + lp.AssertExpectations(t) + }) + } +} + +func Test_ProperlyRecognizesPerLaneCurses(t *testing.T) { + user, bc := ccipdata.NewSimulation(t) + ctx := testutils.Context(t) + destChainSelector := uint64(100) + sourceChainSelector := uint64(200) + onRampAddress, mockRMN, mockRMNAddress := setupOnRampV1_5_0(t, user, bc) + + onRamp, err := NewOnRamp(logger.TestLogger(t), 1, destChainSelector, onRampAddress, mocks.NewLogPoller(t), bc) + require.NoError(t, err) + + onRamp.cachedStaticConfig = func(ctx context.Context) (evm_2_evm_onramp.EVM2EVMOnRampStaticConfig, error) { + return evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + RmnProxy: mockRMNAddress, + }, nil + } + + // Lane is not cursed right after deployment + isCursed, err := onRamp.IsSourceCursed(ctx) + require.NoError(t, err) + assert.False(t, isCursed) + + // Cursing different chain selector + _, err = mockRMN.VoteToCurse0(user, [32]byte{}, ccipcommon.SelectorToBytes(sourceChainSelector)) + require.NoError(t, err) + bc.Commit() + + isCursed, err = onRamp.IsSourceCursed(ctx) + require.NoError(t, err) + assert.False(t, isCursed) + + // Cursing the correct chain selector + _, err = mockRMN.VoteToCurse0(user, [32]byte{}, ccipcommon.SelectorToBytes(destChainSelector)) + require.NoError(t, err) + bc.Commit() + + isCursed, err = onRamp.IsSourceCursed(ctx) + require.NoError(t, err) + assert.True(t, isCursed) + + // Uncursing the chain selector + _, err = mockRMN.OwnerUnvoteToCurse(user, []mock_arm_contract.RMNUnvoteToCurseRecord{}, ccipcommon.SelectorToBytes(destChainSelector)) + require.NoError(t, err) + bc.Commit() + + isCursed, err = onRamp.IsSourceCursed(ctx) + require.NoError(t, err) + assert.False(t, isCursed) +} + +// This is written to benchmark before and after the caching of StaticConfig and RMNContract +func BenchmarkIsSourceCursedWithCache(b *testing.B) { + user, bc := ccipdata.NewSimulation(b) + ctx := testutils.Context(b) + destChainSelector := uint64(100) + onRampAddress, _, _ := setupOnRampV1_5_0(b, user, bc) + + onRamp, err := NewOnRamp(logger.TestLogger(b), 1, destChainSelector, onRampAddress, mocks.NewLogPoller(b), bc) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + _, _ = onRamp.IsSourceCursed(ctx) + } +} + +func setupOnRampV1_5_0(t testing.TB, user *bind.TransactOpts, bc *client.SimulatedBackendClient) (common.Address, *mock_arm_contract.MockARMContract, common.Address) { + rmnAddress, transaction, rmnContract, err := mock_arm_contract.DeployMockARMContract(user, bc) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + + linkTokenAddress := common.HexToAddress("0x000011") + staticConfig := evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: linkTokenAddress, + ChainSelector: testutils.SimulatedChainID.Uint64(), + DestChainSelector: testutils.SimulatedChainID.Uint64(), + DefaultTxGasLimit: 30000, + MaxNopFeesJuels: big.NewInt(1000000), + PrevOnRamp: common.Address{}, + RmnProxy: rmnAddress, + TokenAdminRegistry: utils.RandomAddress(), + } + dynamicConfig := evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: common.HexToAddress("0x0000000000000000000000000000000000000150"), + MaxNumberOfTokensPerMsg: 0, + DestGasOverhead: 0, + DestGasPerPayloadByte: 0, + DestDataAvailabilityOverheadGas: 0, + DestGasPerDataAvailabilityByte: 0, + DestDataAvailabilityMultiplierBps: 0, + PriceRegistry: utils.RandomAddress(), + MaxDataBytes: 0, + MaxPerMsgGasLimit: 0, + DefaultTokenFeeUSDCents: 50, + DefaultTokenDestGasOverhead: 34_000, + DefaultTokenDestBytesOverhead: 500, + } + rateLimiterConfig := evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: false, + Capacity: big.NewInt(5), + Rate: big.NewInt(5), + } + feeTokenConfigs := []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: linkTokenAddress, + NetworkFeeUSDCents: 0, + GasMultiplierWeiPerEth: 0, + PremiumMultiplierWeiPerEth: 0, + Enabled: false, + }, + } + tokenTransferConfigArgs := []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: linkTokenAddress, + MinFeeUSDCents: 0, + MaxFeeUSDCents: 0, + DeciBps: 0, + DestGasOverhead: 0, + DestBytesOverhead: 32, + AggregateRateLimitEnabled: true, + }, + } + nopsAndWeights := []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{ + { + Nop: utils.RandomAddress(), + Weight: 1, + }, + } + onRampAddress, transaction, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + user, + bc, + staticConfig, + dynamicConfig, + rateLimiterConfig, + feeTokenConfigs, + tokenTransferConfigArgs, + nopsAndWeights, + ) + bc.Commit() + require.NoError(t, err) + ccipdata.AssertNonRevert(t, transaction, bc, user) + + return onRampAddress, rmnContract, rmnAddress +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go new file mode 100644 index 00000000000..39ba632aff9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/mocks/price_service_mock.go @@ -0,0 +1,250 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + ccipdata "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + + context "context" + + mock "github.com/stretchr/testify/mock" + + prices "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +// PriceService is an autogenerated mock type for the PriceService type +type PriceService struct { + mock.Mock +} + +type PriceService_Expecter struct { + mock *mock.Mock +} + +func (_m *PriceService) EXPECT() *PriceService_Expecter { + return &PriceService_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *PriceService) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PriceService_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type PriceService_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *PriceService_Expecter) Close() *PriceService_Close_Call { + return &PriceService_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *PriceService_Close_Call) Run(run func()) *PriceService_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *PriceService_Close_Call) Return(_a0 error) *PriceService_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *PriceService_Close_Call) RunAndReturn(run func() error) *PriceService_Close_Call { + _c.Call.Return(run) + return _c +} + +// GetGasAndTokenPrices provides a mock function with given fields: ctx, destChainSelector +func (_m *PriceService) GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error) { + ret := _m.Called(ctx, destChainSelector) + + if len(ret) == 0 { + panic("no return value specified for GetGasAndTokenPrices") + } + + var r0 map[uint64]*big.Int + var r1 map[ccip.Address]*big.Int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error)); ok { + return rf(ctx, destChainSelector) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) map[uint64]*big.Int); ok { + r0 = rf(ctx, destChainSelector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint64]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) map[ccip.Address]*big.Int); ok { + r1 = rf(ctx, destChainSelector) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[ccip.Address]*big.Int) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = rf(ctx, destChainSelector) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// PriceService_GetGasAndTokenPrices_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasAndTokenPrices' +type PriceService_GetGasAndTokenPrices_Call struct { + *mock.Call +} + +// GetGasAndTokenPrices is a helper method to define mock.On call +// - ctx context.Context +// - destChainSelector uint64 +func (_e *PriceService_Expecter) GetGasAndTokenPrices(ctx interface{}, destChainSelector interface{}) *PriceService_GetGasAndTokenPrices_Call { + return &PriceService_GetGasAndTokenPrices_Call{Call: _e.mock.On("GetGasAndTokenPrices", ctx, destChainSelector)} +} + +func (_c *PriceService_GetGasAndTokenPrices_Call) Run(run func(ctx context.Context, destChainSelector uint64)) *PriceService_GetGasAndTokenPrices_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *PriceService_GetGasAndTokenPrices_Call) Return(_a0 map[uint64]*big.Int, _a1 map[ccip.Address]*big.Int, _a2 error) *PriceService_GetGasAndTokenPrices_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *PriceService_GetGasAndTokenPrices_Call) RunAndReturn(run func(context.Context, uint64) (map[uint64]*big.Int, map[ccip.Address]*big.Int, error)) *PriceService_GetGasAndTokenPrices_Call { + _c.Call.Return(run) + return _c +} + +// Start provides a mock function with given fields: _a0 +func (_m *PriceService) Start(_a0 context.Context) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PriceService_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type PriceService_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - _a0 context.Context +func (_e *PriceService_Expecter) Start(_a0 interface{}) *PriceService_Start_Call { + return &PriceService_Start_Call{Call: _e.mock.On("Start", _a0)} +} + +func (_c *PriceService_Start_Call) Run(run func(_a0 context.Context)) *PriceService_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *PriceService_Start_Call) Return(_a0 error) *PriceService_Start_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *PriceService_Start_Call) RunAndReturn(run func(context.Context) error) *PriceService_Start_Call { + _c.Call.Return(run) + return _c +} + +// UpdateDynamicConfig provides a mock function with given fields: ctx, gasPriceEstimator, destPriceRegistryReader +func (_m *PriceService) UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error { + ret := _m.Called(ctx, gasPriceEstimator, destPriceRegistryReader) + + if len(ret) == 0 { + panic("no return value specified for UpdateDynamicConfig") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, prices.GasPriceEstimatorCommit, ccipdata.PriceRegistryReader) error); ok { + r0 = rf(ctx, gasPriceEstimator, destPriceRegistryReader) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PriceService_UpdateDynamicConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDynamicConfig' +type PriceService_UpdateDynamicConfig_Call struct { + *mock.Call +} + +// UpdateDynamicConfig is a helper method to define mock.On call +// - ctx context.Context +// - gasPriceEstimator prices.GasPriceEstimatorCommit +// - destPriceRegistryReader ccipdata.PriceRegistryReader +func (_e *PriceService_Expecter) UpdateDynamicConfig(ctx interface{}, gasPriceEstimator interface{}, destPriceRegistryReader interface{}) *PriceService_UpdateDynamicConfig_Call { + return &PriceService_UpdateDynamicConfig_Call{Call: _e.mock.On("UpdateDynamicConfig", ctx, gasPriceEstimator, destPriceRegistryReader)} +} + +func (_c *PriceService_UpdateDynamicConfig_Call) Run(run func(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader)) *PriceService_UpdateDynamicConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(prices.GasPriceEstimatorCommit), args[2].(ccipdata.PriceRegistryReader)) + }) + return _c +} + +func (_c *PriceService_UpdateDynamicConfig_Call) Return(_a0 error) *PriceService_UpdateDynamicConfig_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *PriceService_UpdateDynamicConfig_Call) RunAndReturn(run func(context.Context, prices.GasPriceEstimatorCommit, ccipdata.PriceRegistryReader) error) *PriceService_UpdateDynamicConfig_Call { + _c.Call.Return(run) + return _c +} + +// NewPriceService creates a new instance of PriceService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPriceService(t interface { + mock.TestingT + Cleanup(func()) +}) *PriceService { + mock := &PriceService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go new file mode 100644 index 00000000000..7d7d5bda3ad --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service.go @@ -0,0 +1,400 @@ +package db + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/services" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/utils" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +// PriceService manages DB access for gas and token price data. +// In the background, PriceService periodically inserts latest gas and token prices into the DB. +// During `Observation` phase, Commit plugin calls PriceService to fetch the latest prices from DB. +// This enables all lanes connected to a chain to feed price data to the leader lane's Commit plugin for that chain. +type PriceService interface { + job.ServiceCtx + + // UpdateDynamicConfig updates gasPriceEstimator and destPriceRegistryReader during Commit plugin dynamic config change. + UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error + + // GetGasAndTokenPrices fetches source chain gas prices and relevant token prices from all lanes that touch the given dest chain. + // The prices have been written into the DB by each lane's PriceService in the background. The prices are denoted in USD. + GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[cciptypes.Address]*big.Int, error) +} + +var _ PriceService = (*priceService)(nil) + +const ( + // Prices should expire after 10 minutes in DB. Prices should be fresh in the Commit plugin. + // 10 min provides sufficient buffer for the Commit plugin to withstand transient price update outages, while + // surfacing price update outages quickly enough. + priceExpireSec = 600 + // Cleanups are called every 10 minutes. For a given job, on average we may expect 3 token prices and 1 gas price. + // 10 minutes should result in 40 rows being cleaned up per job, it is not a heavy load on DB, so there is no need + // to run cleanup more frequently. We shouldn't clean up less frequently than `priceExpireSec`. + priceCleanupInterval = 600 * time.Second + + // Prices are refreshed every 1 minute, they are sufficiently accurate, and consistent with Commit OCR round time. + priceUpdateInterval = 60 * time.Second +) + +type priceService struct { + priceExpireSec int + cleanupInterval time.Duration + updateInterval time.Duration + + lggr logger.Logger + orm cciporm.ORM + jobId int32 + destChainSelector uint64 + + sourceChainSelector uint64 + sourceNative cciptypes.Address + priceGetter pricegetter.PriceGetter + offRampReader ccipdata.OffRampReader + gasPriceEstimator prices.GasPriceEstimatorCommit + destPriceRegistryReader ccipdata.PriceRegistryReader + + services.StateMachine + wg *sync.WaitGroup + backgroundCtx context.Context //nolint:containedctx + backgroundCancel context.CancelFunc + dynamicConfigMu *sync.RWMutex +} + +func NewPriceService( + lggr logger.Logger, + orm cciporm.ORM, + jobId int32, + destChainSelector uint64, + sourceChainSelector uint64, + + sourceNative cciptypes.Address, + priceGetter pricegetter.PriceGetter, + offRampReader ccipdata.OffRampReader, +) PriceService { + ctx, cancel := context.WithCancel(context.Background()) + + pw := &priceService{ + priceExpireSec: priceExpireSec, + cleanupInterval: utils.WithJitter(priceCleanupInterval), // use WithJitter to avoid multiple services impacting DB at same time + updateInterval: utils.WithJitter(priceUpdateInterval), + + lggr: lggr, + orm: orm, + jobId: jobId, + destChainSelector: destChainSelector, + + sourceChainSelector: sourceChainSelector, + sourceNative: sourceNative, + priceGetter: priceGetter, + offRampReader: offRampReader, + + wg: new(sync.WaitGroup), + backgroundCtx: ctx, + backgroundCancel: cancel, + dynamicConfigMu: &sync.RWMutex{}, + } + return pw +} + +func (p *priceService) Start(context.Context) error { + return p.StateMachine.StartOnce("PriceService", func() error { + p.lggr.Info("Starting PriceService") + p.wg.Add(1) + p.run() + return nil + }) +} + +func (p *priceService) Close() error { + return p.StateMachine.StopOnce("PriceService", func() error { + p.lggr.Info("Closing PriceService") + p.backgroundCancel() + p.wg.Wait() + return nil + }) +} + +func (p *priceService) run() { + cleanupTicker := time.NewTicker(p.cleanupInterval) + updateTicker := time.NewTicker(p.updateInterval) + + go func() { + defer p.wg.Done() + + for { + select { + case <-p.backgroundCtx.Done(): + return + case <-cleanupTicker.C: + err := p.runCleanup(p.backgroundCtx) + if err != nil { + p.lggr.Errorw("Error when cleaning up in-db prices in the background", "err", err) + } + case <-updateTicker.C: + err := p.runUpdate(p.backgroundCtx) + if err != nil { + p.lggr.Errorw("Error when updating prices in the background", "err", err) + } + } + } + }() +} + +func (p *priceService) UpdateDynamicConfig(ctx context.Context, gasPriceEstimator prices.GasPriceEstimatorCommit, destPriceRegistryReader ccipdata.PriceRegistryReader) error { + p.dynamicConfigMu.Lock() + p.gasPriceEstimator = gasPriceEstimator + p.destPriceRegistryReader = destPriceRegistryReader + p.dynamicConfigMu.Unlock() + + // Config update may substantially change the prices, refresh the prices immediately, this also makes testing easier + // for not having to wait to the full update interval. + if err := p.runUpdate(ctx); err != nil { + p.lggr.Errorw("Error when updating prices after dynamic config update", "err", err) + } + + return nil +} + +func (p *priceService) GetGasAndTokenPrices(ctx context.Context, destChainSelector uint64) (map[uint64]*big.Int, map[cciptypes.Address]*big.Int, error) { + eg := new(errgroup.Group) + + var gasPricesInDB []cciporm.GasPrice + var tokenPricesInDB []cciporm.TokenPrice + + eg.Go(func() error { + gasPrices, err := p.orm.GetGasPricesByDestChain(ctx, destChainSelector) + if err != nil { + return fmt.Errorf("failed to get gas prices from db: %w", err) + } + gasPricesInDB = gasPrices + return nil + }) + + eg.Go(func() error { + tokenPrices, err := p.orm.GetTokenPricesByDestChain(ctx, destChainSelector) + if err != nil { + return fmt.Errorf("failed to get token prices from db: %w", err) + } + tokenPricesInDB = tokenPrices + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + gasPrices := make(map[uint64]*big.Int, len(gasPricesInDB)) + tokenPrices := make(map[cciptypes.Address]*big.Int, len(tokenPricesInDB)) + + for _, gasPrice := range gasPricesInDB { + if gasPrice.GasPrice != nil { + gasPrices[gasPrice.SourceChainSelector] = gasPrice.GasPrice.ToInt() + } + } + + for _, tokenPrice := range tokenPricesInDB { + if tokenPrice.TokenPrice != nil { + tokenPrices[cciptypes.Address(tokenPrice.TokenAddr)] = tokenPrice.TokenPrice.ToInt() + } + } + + return gasPrices, tokenPrices, nil +} + +func (p *priceService) runCleanup(ctx context.Context) error { + eg := new(errgroup.Group) + + eg.Go(func() error { + err := p.orm.ClearGasPricesByDestChain(ctx, p.destChainSelector, p.priceExpireSec) + if err != nil { + return fmt.Errorf("error clearing gas prices: %w", err) + } + return nil + }) + + eg.Go(func() error { + err := p.orm.ClearTokenPricesByDestChain(ctx, p.destChainSelector, p.priceExpireSec) + if err != nil { + return fmt.Errorf("error clearing token prices: %w", err) + } + return nil + }) + + return eg.Wait() +} + +func (p *priceService) runUpdate(ctx context.Context) error { + // Protect against concurrent updates of `gasPriceEstimator` and `destPriceRegistryReader` + // Price updates happen infrequently - once every `priceUpdateInterval` seconds. + // It does not happen on any code path that is performance sensitive. + // We can afford to have non-performant unlocks here that is simple and safe. + p.dynamicConfigMu.RLock() + defer p.dynamicConfigMu.RUnlock() + + // There may be a period of time between service is started and dynamic config is updated + if p.gasPriceEstimator == nil || p.destPriceRegistryReader == nil { + p.lggr.Info("Skipping price update due to gasPriceEstimator and/or destPriceRegistry not ready") + return nil + } + + sourceGasPriceUSD, tokenPricesUSD, err := p.observePriceUpdates(ctx, p.lggr) + if err != nil { + return fmt.Errorf("failed to observe price updates: %w", err) + } + + err = p.writePricesToDB(ctx, sourceGasPriceUSD, tokenPricesUSD) + if err != nil { + return fmt.Errorf("failed to write prices to db: %w", err) + } + + return nil +} + +func (p *priceService) observePriceUpdates( + ctx context.Context, + lggr logger.Logger, +) (sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) { + if p.gasPriceEstimator == nil || p.destPriceRegistryReader == nil { + return nil, nil, fmt.Errorf("gasPriceEstimator and/or destPriceRegistry is not set yet") + } + + sortedLaneTokens, filteredLaneTokens, err := ccipcommon.GetFilteredSortedLaneTokens(ctx, p.offRampReader, p.destPriceRegistryReader, p.priceGetter) + + lggr.Debugw("Filtered bridgeable tokens with no configured price getter", "filteredLaneTokens", filteredLaneTokens) + + if err != nil { + return nil, nil, fmt.Errorf("get destination tokens: %w", err) + } + + return p.generatePriceUpdates(ctx, lggr, sortedLaneTokens) +} + +// All prices are USD ($1=1e18) denominated. All prices must be not nil. +// Return token prices should contain the exact same tokens as in tokenDecimals. +func (p *priceService) generatePriceUpdates( + ctx context.Context, + lggr logger.Logger, + sortedLaneTokens []cciptypes.Address, +) (sourceGasPriceUSD *big.Int, tokenPricesUSD map[cciptypes.Address]*big.Int, err error) { + // Include wrapped native in our token query as way to identify the source native USD price. + // notice USD is in 1e18 scale, i.e. $1 = 1e18 + queryTokens := ccipcommon.FlattenUniqueSlice([]cciptypes.Address{p.sourceNative}, sortedLaneTokens) + + rawTokenPricesUSD, err := p.priceGetter.TokenPricesUSD(ctx, queryTokens) + if err != nil { + return nil, nil, err + } + lggr.Infow("Raw token prices", "rawTokenPrices", rawTokenPricesUSD) + + // make sure that we got prices for all the tokens of our query + for _, token := range queryTokens { + if rawTokenPricesUSD[token] == nil { + return nil, nil, fmt.Errorf("missing token price: %+v", token) + } + } + + sourceNativePriceUSD, exists := rawTokenPricesUSD[p.sourceNative] + if !exists { + return nil, nil, fmt.Errorf("missing source native (%s) price", p.sourceNative) + } + + destTokensDecimals, err := p.destPriceRegistryReader.GetTokensDecimals(ctx, sortedLaneTokens) + if err != nil { + return nil, nil, fmt.Errorf("get tokens decimals: %w", err) + } + + tokenPricesUSD = make(map[cciptypes.Address]*big.Int, len(rawTokenPricesUSD)) + for i, token := range sortedLaneTokens { + tokenPricesUSD[token] = calculateUsdPer1e18TokenAmount(rawTokenPricesUSD[token], destTokensDecimals[i]) + } + + sourceGasPrice, err := p.gasPriceEstimator.GetGasPrice(ctx) + if err != nil { + return nil, nil, err + } + if sourceGasPrice == nil { + return nil, nil, fmt.Errorf("missing gas price") + } + sourceGasPriceUSD, err = p.gasPriceEstimator.DenoteInUSD(sourceGasPrice, sourceNativePriceUSD) + if err != nil { + return nil, nil, err + } + + lggr.Infow("PriceService observed latest price", + "sourceChainSelector", p.sourceChainSelector, + "destChainSelector", p.destChainSelector, + "gasPriceWei", sourceGasPrice, + "sourceNativePriceUSD", sourceNativePriceUSD, + "sourceGasPriceUSD", sourceGasPriceUSD, + "tokenPricesUSD", tokenPricesUSD, + ) + return sourceGasPriceUSD, tokenPricesUSD, nil +} + +func (p *priceService) writePricesToDB( + ctx context.Context, + sourceGasPriceUSD *big.Int, + tokenPricesUSD map[cciptypes.Address]*big.Int, +) (err error) { + eg := new(errgroup.Group) + + if sourceGasPriceUSD != nil { + eg.Go(func() error { + return p.orm.InsertGasPricesForDestChain(ctx, p.destChainSelector, p.jobId, []cciporm.GasPriceUpdate{ + { + SourceChainSelector: p.sourceChainSelector, + GasPrice: assets.NewWei(sourceGasPriceUSD), + }, + }) + }) + } + + if tokenPricesUSD != nil { + var tokenPrices []cciporm.TokenPriceUpdate + + for token, price := range tokenPricesUSD { + tokenPrices = append(tokenPrices, cciporm.TokenPriceUpdate{ + TokenAddr: string(token), + TokenPrice: assets.NewWei(price), + }) + } + + // Sort token by addr to make price updates ordering deterministic, easier to testing and debugging + sort.Slice(tokenPrices, func(i, j int) bool { + return tokenPrices[i].TokenAddr < tokenPrices[j].TokenAddr + }) + + eg.Go(func() error { + return p.orm.InsertTokenPricesForDestChain(ctx, p.destChainSelector, p.jobId, tokenPrices) + }) + } + + return eg.Wait() +} + +// Input price is USD per full token, with 18 decimal precision +// Result price is USD per 1e18 of smallest token denomination, with 18 decimal precision +// Example: 1 USDC = 1.00 USD per full token, each full token is 6 decimals -> 1 * 1e18 * 1e18 / 1e6 = 1e30 +func calculateUsdPer1e18TokenAmount(price *big.Int, decimals uint8) *big.Int { + tmp := big.NewInt(0).Mul(price, big.NewInt(1e18)) + return tmp.Div(tmp, big.NewInt(0).Exp(big.NewInt(10), big.NewInt(int64(decimals)), nil)) +} diff --git a/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go new file mode 100644 index 00000000000..0bea8af9a19 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/ccipdb/price_service_test.go @@ -0,0 +1,755 @@ +package db + +import ( + "context" + "fmt" + "math/big" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + cciporm "github.com/smartcontractkit/chainlink/v2/core/services/ccip" + ccipmocks "github.com/smartcontractkit/chainlink/v2/core/services/ccip/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcommon" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" +) + +func TestPriceService_priceCleanup(t *testing.T) { + lggr := logger.TestLogger(t) + jobId := int32(1) + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + + testCases := []struct { + name string + gasPriceError bool + tokenPriceError bool + expectedErr bool + }{ + { + name: "ORM called successfully", + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "gasPrice clear failed", + gasPriceError: true, + tokenPriceError: false, + expectedErr: true, + }, + { + name: "tokenPrice clear failed", + gasPriceError: false, + tokenPriceError: true, + expectedErr: true, + }, + { + name: "both ORM calls failed", + gasPriceError: true, + tokenPriceError: true, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := tests.Context(t) + + var gasPricesError error + var tokenPricesError error + if tc.gasPriceError { + gasPricesError = fmt.Errorf("gas prices error") + } + if tc.tokenPriceError { + tokenPricesError = fmt.Errorf("token prices error") + } + + mockOrm := ccipmocks.NewORM(t) + mockOrm.On("ClearGasPricesByDestChain", ctx, destChainSelector, priceExpireSec).Return(gasPricesError).Once() + mockOrm.On("ClearTokenPricesByDestChain", ctx, destChainSelector, priceExpireSec).Return(tokenPricesError).Once() + + priceService := NewPriceService( + lggr, + mockOrm, + jobId, + destChainSelector, + sourceChainSelector, + "", + nil, + nil, + ).(*priceService) + err := priceService.runCleanup(ctx) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestPriceService_priceWrite(t *testing.T) { + lggr := logger.TestLogger(t) + jobId := int32(1) + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + + gasPrice := big.NewInt(1e18) + tokenPrices := map[cciptypes.Address]*big.Int{ + "0x123": big.NewInt(2e18), + "0x234": big.NewInt(3e18), + } + + expectedGasPriceUpdate := []cciporm.GasPriceUpdate{ + { + SourceChainSelector: sourceChainSelector, + GasPrice: assets.NewWei(gasPrice), + }, + } + expectedTokenPriceUpdate := []cciporm.TokenPriceUpdate{ + { + TokenAddr: "0x123", + TokenPrice: assets.NewWei(big.NewInt(2e18)), + }, + { + TokenAddr: "0x234", + TokenPrice: assets.NewWei(big.NewInt(3e18)), + }, + } + + testCases := []struct { + name string + gasPriceError bool + tokenPriceError bool + expectedErr bool + }{ + { + name: "ORM called successfully", + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "gasPrice clear failed", + gasPriceError: true, + tokenPriceError: false, + expectedErr: true, + }, + { + name: "tokenPrice clear failed", + gasPriceError: false, + tokenPriceError: true, + expectedErr: true, + }, + { + name: "both ORM calls failed", + gasPriceError: true, + tokenPriceError: true, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := tests.Context(t) + + var gasPricesError error + var tokenPricesError error + if tc.gasPriceError { + gasPricesError = fmt.Errorf("gas prices error") + } + if tc.tokenPriceError { + tokenPricesError = fmt.Errorf("token prices error") + } + + mockOrm := ccipmocks.NewORM(t) + mockOrm.On("InsertGasPricesForDestChain", ctx, destChainSelector, jobId, expectedGasPriceUpdate).Return(gasPricesError).Once() + mockOrm.On("InsertTokenPricesForDestChain", ctx, destChainSelector, jobId, expectedTokenPriceUpdate).Return(tokenPricesError).Once() + + priceService := NewPriceService( + lggr, + mockOrm, + jobId, + destChainSelector, + sourceChainSelector, + "", + nil, + nil, + ).(*priceService) + err := priceService.writePricesToDB(ctx, gasPrice, tokenPrices) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestPriceService_generatePriceUpdates(t *testing.T) { + lggr := logger.TestLogger(t) + jobId := int32(1) + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + + const nTokens = 10 + tokens := make([]cciptypes.Address, nTokens) + for i := range tokens { + tokens[i] = cciptypes.Address(utils.RandomAddress().String()) + } + sort.Slice(tokens, func(i, j int) bool { return tokens[i] < tokens[j] }) + + testCases := []struct { + name string + tokenDecimals map[cciptypes.Address]uint8 + sourceNativeToken cciptypes.Address + priceGetterRespData map[cciptypes.Address]*big.Int + priceGetterRespErr error + feeEstimatorRespFee *big.Int + feeEstimatorRespErr error + maxGasPrice uint64 + expSourceGasPriceUSD *big.Int + expTokenPricesUSD map[cciptypes.Address]*big.Int + expErr bool + }{ + { + name: "base", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 12, + }, + sourceNativeToken: tokens[0], + priceGetterRespData: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200), + tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped) + }, + priceGetterRespErr: nil, + feeEstimatorRespFee: big.NewInt(10), + feeEstimatorRespErr: nil, + maxGasPrice: 1e18, + expSourceGasPriceUSD: big.NewInt(1000), + expTokenPricesUSD: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200 * 1e6), + }, + expErr: false, + }, + { + name: "price getter returned an error", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 18, + }, + sourceNativeToken: tokens[0], + priceGetterRespData: nil, + priceGetterRespErr: fmt.Errorf("some random network error"), + expErr: true, + }, + { + name: "price getter skipped a requested price", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 18, + }, + sourceNativeToken: tokens[0], + priceGetterRespData: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + }, + priceGetterRespErr: nil, + expErr: true, + }, + { + name: "price getter skipped source native price", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 18, + }, + sourceNativeToken: tokens[2], + priceGetterRespData: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200), + }, + priceGetterRespErr: nil, + expErr: true, + }, + { + name: "dynamic fee cap overrides legacy", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 18, + }, + sourceNativeToken: tokens[0], + priceGetterRespData: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200), + tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped) + }, + priceGetterRespErr: nil, + feeEstimatorRespFee: big.NewInt(20), + feeEstimatorRespErr: nil, + maxGasPrice: 1e18, + expSourceGasPriceUSD: big.NewInt(2000), + expTokenPricesUSD: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200), + }, + expErr: false, + }, + { + name: "nil gas price", + tokenDecimals: map[cciptypes.Address]uint8{ + tokens[0]: 18, + tokens[1]: 18, + }, + sourceNativeToken: tokens[0], + priceGetterRespData: map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(100), + tokens[1]: val1e18(200), + tokens[2]: val1e18(300), // price getter returned a price for this token even though we didn't request it (should be skipped) + }, + feeEstimatorRespFee: nil, + maxGasPrice: 1e18, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + priceGetter := pricegetter.NewMockPriceGetter(t) + defer priceGetter.AssertExpectations(t) + + gasPriceEstimator := prices.NewMockGasPriceEstimatorCommit(t) + defer gasPriceEstimator.AssertExpectations(t) + + var destTokens []cciptypes.Address + for tk := range tc.tokenDecimals { + destTokens = append(destTokens, tk) + } + sort.Slice(destTokens, func(i, j int) bool { + return destTokens[i] < destTokens[j] + }) + var destDecimals []uint8 + for _, token := range destTokens { + destDecimals = append(destDecimals, tc.tokenDecimals[token]) + } + + queryTokens := ccipcommon.FlattenUniqueSlice([]cciptypes.Address{tc.sourceNativeToken}, destTokens) + + if len(queryTokens) > 0 { + priceGetter.On("TokenPricesUSD", mock.Anything, queryTokens).Return(tc.priceGetterRespData, tc.priceGetterRespErr) + } + + if tc.maxGasPrice > 0 { + gasPriceEstimator.On("GetGasPrice", mock.Anything).Return(tc.feeEstimatorRespFee, tc.feeEstimatorRespErr) + if tc.feeEstimatorRespFee != nil { + pUSD := ccipcalc.CalculateUsdPerUnitGas(tc.feeEstimatorRespFee, tc.expTokenPricesUSD[tc.sourceNativeToken]) + gasPriceEstimator.On("DenoteInUSD", mock.Anything, mock.Anything).Return(pUSD, nil) + } + } + + destPriceReg := ccipdatamocks.NewPriceRegistryReader(t) + destPriceReg.On("GetTokensDecimals", mock.Anything, destTokens).Return(destDecimals, nil).Maybe() + + priceService := NewPriceService( + lggr, + nil, + jobId, + destChainSelector, + sourceChainSelector, + tc.sourceNativeToken, + priceGetter, + nil, + ).(*priceService) + priceService.gasPriceEstimator = gasPriceEstimator + priceService.destPriceRegistryReader = destPriceReg + + sourceGasPriceUSD, tokenPricesUSD, err := priceService.generatePriceUpdates(context.Background(), lggr, destTokens) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.True(t, tc.expSourceGasPriceUSD.Cmp(sourceGasPriceUSD) == 0) + assert.True(t, reflect.DeepEqual(tc.expTokenPricesUSD, tokenPricesUSD)) + }) + } +} + +func TestPriceService_calculateUsdPer1e18TokenAmount(t *testing.T) { + testCases := []struct { + name string + price *big.Int + decimal uint8 + wantResult *big.Int + }{ + { + name: "18-decimal token, $6.5 per token", + price: big.NewInt(65e17), + decimal: 18, + wantResult: big.NewInt(65e17), + }, + { + name: "6-decimal token, $1 per token", + price: big.NewInt(1e18), + decimal: 6, + wantResult: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e12)), // 1e30 + }, + { + name: "0-decimal token, $1 per token", + price: big.NewInt(1e18), + decimal: 0, + wantResult: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1e18)), // 1e36 + }, + { + name: "36-decimal token, $1 per token", + price: big.NewInt(1e18), + decimal: 36, + wantResult: big.NewInt(1), + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := calculateUsdPer1e18TokenAmount(tt.price, tt.decimal) + assert.Equal(t, tt.wantResult, got) + }) + } +} + +func TestPriceService_GetGasAndTokenPrices(t *testing.T) { + lggr := logger.TestLogger(t) + jobId := int32(1) + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + + token1 := ccipcalc.HexToAddress("0x123") + token2 := ccipcalc.HexToAddress("0x234") + + gasPrice := big.NewInt(1e18) + tokenPrices := map[cciptypes.Address]*big.Int{ + token1: big.NewInt(2e18), + token2: big.NewInt(3e18), + } + + testCases := []struct { + name string + ormGasPricesResult []cciporm.GasPrice + ormTokenPricesResult []cciporm.TokenPrice + + expectedGasPrices map[uint64]*big.Int + expectedTokenPrices map[cciptypes.Address]*big.Int + + gasPriceError bool + tokenPriceError bool + expectedErr bool + }{ + { + name: "ORM called successfully", + ormGasPricesResult: []cciporm.GasPrice{ + { + SourceChainSelector: sourceChainSelector, + GasPrice: assets.NewWei(gasPrice), + }, + }, + ormTokenPricesResult: []cciporm.TokenPrice{ + { + TokenAddr: string(token1), + TokenPrice: assets.NewWei(tokenPrices[token1]), + }, + { + TokenAddr: string(token2), + TokenPrice: assets.NewWei(tokenPrices[token2]), + }, + }, + expectedGasPrices: map[uint64]*big.Int{ + sourceChainSelector: gasPrice, + }, + expectedTokenPrices: tokenPrices, + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "multiple gas prices with nil token price", + ormGasPricesResult: []cciporm.GasPrice{ + { + SourceChainSelector: sourceChainSelector, + GasPrice: assets.NewWei(gasPrice), + }, + { + SourceChainSelector: sourceChainSelector + 1, + GasPrice: assets.NewWei(big.NewInt(200)), + }, + { + SourceChainSelector: sourceChainSelector + 2, + GasPrice: assets.NewWei(big.NewInt(300)), + }, + }, + ormTokenPricesResult: nil, + expectedGasPrices: map[uint64]*big.Int{ + sourceChainSelector: gasPrice, + sourceChainSelector + 1: big.NewInt(200), + sourceChainSelector + 2: big.NewInt(300), + }, + expectedTokenPrices: map[cciptypes.Address]*big.Int{}, + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "multiple token prices with nil gas price", + ormGasPricesResult: nil, + ormTokenPricesResult: []cciporm.TokenPrice{ + { + TokenAddr: string(token1), + TokenPrice: assets.NewWei(tokenPrices[token1]), + }, + { + TokenAddr: string(token2), + TokenPrice: assets.NewWei(tokenPrices[token2]), + }, + }, + expectedGasPrices: map[uint64]*big.Int{}, + expectedTokenPrices: tokenPrices, + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "nil prices filtered out", + ormGasPricesResult: []cciporm.GasPrice{ + { + SourceChainSelector: sourceChainSelector, + GasPrice: nil, + }, + { + SourceChainSelector: sourceChainSelector + 1, + GasPrice: assets.NewWei(gasPrice), + }, + }, + ormTokenPricesResult: []cciporm.TokenPrice{ + { + TokenAddr: string(token1), + TokenPrice: assets.NewWei(tokenPrices[token1]), + }, + { + TokenAddr: string(token2), + TokenPrice: nil, + }, + }, + expectedGasPrices: map[uint64]*big.Int{ + sourceChainSelector + 1: gasPrice, + }, + expectedTokenPrices: map[cciptypes.Address]*big.Int{ + token1: tokenPrices[token1], + }, + gasPriceError: false, + tokenPriceError: false, + expectedErr: false, + }, + { + name: "gasPrice clear failed", + gasPriceError: true, + tokenPriceError: false, + expectedErr: true, + }, + { + name: "tokenPrice clear failed", + gasPriceError: false, + tokenPriceError: true, + expectedErr: true, + }, + { + name: "both ORM calls failed", + gasPriceError: true, + tokenPriceError: true, + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := tests.Context(t) + + mockOrm := ccipmocks.NewORM(t) + if tc.gasPriceError { + mockOrm.On("GetGasPricesByDestChain", ctx, destChainSelector).Return(nil, fmt.Errorf("gas prices error")).Once() + } else { + mockOrm.On("GetGasPricesByDestChain", ctx, destChainSelector).Return(tc.ormGasPricesResult, nil).Once() + } + if tc.tokenPriceError { + mockOrm.On("GetTokenPricesByDestChain", ctx, destChainSelector).Return(nil, fmt.Errorf("token prices error")).Once() + } else { + mockOrm.On("GetTokenPricesByDestChain", ctx, destChainSelector).Return(tc.ormTokenPricesResult, nil).Once() + } + + priceService := NewPriceService( + lggr, + mockOrm, + jobId, + destChainSelector, + sourceChainSelector, + "", + nil, + nil, + ).(*priceService) + gasPricesResult, tokenPricesResult, err := priceService.GetGasAndTokenPrices(ctx, destChainSelector) + if tc.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedGasPrices, gasPricesResult) + assert.Equal(t, tc.expectedTokenPrices, tokenPricesResult) + } + }) + } +} + +func val1e18(val int64) *big.Int { + return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) +} + +func setupORM(t *testing.T) cciporm.ORM { + t.Helper() + + db := pgtest.NewSqlxDB(t) + orm, err := cciporm.NewORM(db) + + require.NoError(t, err) + + return orm +} + +func checkResultLen(t *testing.T, priceService PriceService, destChainSelector uint64, gasCount int, tokenCount int) error { + ctx := tests.Context(t) + dbGasResult, dbTokenResult, err := priceService.GetGasAndTokenPrices(ctx, destChainSelector) + if err != nil { + return nil + } + if len(dbGasResult) != gasCount { + return fmt.Errorf("expected %d gas prices, got %d", gasCount, len(dbGasResult)) + } + if len(dbTokenResult) != tokenCount { + return fmt.Errorf("expected %d token prices, got %d", tokenCount, len(dbTokenResult)) + } + return nil +} + +func TestPriceService_priceWriteAndCleanupInBackground(t *testing.T) { + lggr := logger.TestLogger(t) + jobId := int32(1) + destChainSelector := uint64(12345) + sourceChainSelector := uint64(67890) + ctx := tests.Context(t) + + sourceNative := cciptypes.Address("0x123") + feeTokens := []cciptypes.Address{"0x234"} + rampTokens := []cciptypes.Address{"0x345", "0x456"} + rampFilteredTokens := []cciptypes.Address{"0x345"} + rampFilterOutTokens := []cciptypes.Address{"0x456"} + + laneTokens := []cciptypes.Address{"0x234", "0x345"} + laneTokenDecimals := []uint8{18, 18} + + tokens := []cciptypes.Address{sourceNative, "0x234", "0x345"} + tokenPrices := []int64{2, 3, 4} + gasPrice := big.NewInt(10) + + orm := setupORM(t) + + priceGetter := pricegetter.NewMockPriceGetter(t) + defer priceGetter.AssertExpectations(t) + + gasPriceEstimator := prices.NewMockGasPriceEstimatorCommit(t) + defer gasPriceEstimator.AssertExpectations(t) + + priceGetter.On("TokenPricesUSD", mock.Anything, tokens).Return(map[cciptypes.Address]*big.Int{ + tokens[0]: val1e18(tokenPrices[0]), + tokens[1]: val1e18(tokenPrices[1]), + tokens[2]: val1e18(tokenPrices[2]), + }, nil) + priceGetter.On("FilterConfiguredTokens", mock.Anything, rampTokens).Return(rampFilteredTokens, rampFilterOutTokens, nil) + + offRampReader := ccipdatamocks.NewOffRampReader(t) + offRampReader.On("GetTokens", mock.Anything).Return(cciptypes.OffRampTokens{ + DestinationTokens: rampTokens, + }, nil).Maybe() + + gasPriceEstimator.On("GetGasPrice", mock.Anything).Return(gasPrice, nil) + pUSD := ccipcalc.CalculateUsdPerUnitGas(gasPrice, val1e18(tokenPrices[0])) + gasPriceEstimator.On("DenoteInUSD", mock.Anything, mock.Anything).Return(pUSD, nil) + + destPriceReg := ccipdatamocks.NewPriceRegistryReader(t) + destPriceReg.On("GetTokensDecimals", mock.Anything, laneTokens).Return(laneTokenDecimals, nil).Maybe() + destPriceReg.On("GetFeeTokens", mock.Anything).Return(feeTokens, nil).Maybe() + + priceService := NewPriceService( + lggr, + orm, + jobId, + destChainSelector, + sourceChainSelector, + tokens[0], + priceGetter, + offRampReader, + ).(*priceService) + + updateInterval := 2000 * time.Millisecond + cleanupInterval := 3000 * time.Millisecond + + // run write task every 2 second + priceService.updateInterval = updateInterval + // run cleanup every 3 seconds + priceService.cleanupInterval = cleanupInterval + // expire all prices during every cleanup + priceService.priceExpireSec = 0 + + // initially, db is empty + assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 0, 0)) + + // starts PriceService in the background + assert.NoError(t, priceService.Start(ctx)) + + // setting dynamicConfig triggers initial price update + err := priceService.UpdateDynamicConfig(ctx, gasPriceEstimator, destPriceReg) + assert.NoError(t, err) + assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 1, len(laneTokens))) + + // eventually prices will be cleaned + assert.Eventually(t, func() bool { + err := checkResultLen(t, priceService, destChainSelector, 0, 0) + return err == nil + }, testutils.WaitTimeout(t), testutils.TestInterval) + + // then prices will be updated again + assert.Eventually(t, func() bool { + err := checkResultLen(t, priceService, destChainSelector, 1, len(laneTokens)) + return err == nil + }, testutils.WaitTimeout(t), testutils.TestInterval) + + assert.NoError(t, priceService.Close()) + assert.NoError(t, priceService.runCleanup(ctx)) + + // after stopping PriceService and runCleanup, no more updates are inserted + for i := 0; i < 5; i++ { + time.Sleep(time.Second) + assert.NoError(t, checkResultLen(t, priceService, destChainSelector, 0, 0)) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go new file mode 100644 index 00000000000..e42dd8c154d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters.go @@ -0,0 +1,73 @@ +package logpollerutil + +import ( + "context" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" +) + +func RegisterLpFilters(lp logpoller.LogPoller, filters []logpoller.Filter) error { + for _, lpFilter := range filters { + if filterContainsZeroAddress(lpFilter.Addresses) { + continue + } + // FIXME Dim pgOpts removed from LogPoller + if err := lp.RegisterFilter(context.Background(), lpFilter); err != nil { + return err + } + } + return nil +} + +func UnregisterLpFilters(lp logpoller.LogPoller, filters []logpoller.Filter) error { + for _, lpFilter := range filters { + if filterContainsZeroAddress(lpFilter.Addresses) { + continue + } + // FIXME Dim pgOpts removed from LogPoller + if err := lp.UnregisterFilter(context.Background(), lpFilter.Name); err != nil { + return err + } + } + return nil +} + +func FiltersDiff(filtersBefore, filtersNow []logpoller.Filter) (created, deleted []logpoller.Filter) { + created = make([]logpoller.Filter, 0, len(filtersNow)) + deleted = make([]logpoller.Filter, 0, len(filtersBefore)) + + for _, f := range filtersNow { + if !containsFilter(filtersBefore, f) { + created = append(created, f) + } + } + + for _, f := range filtersBefore { + if !containsFilter(filtersNow, f) { + deleted = append(deleted, f) + } + } + + return created, deleted +} + +func containsFilter(filters []logpoller.Filter, f logpoller.Filter) bool { + for _, existing := range filters { + if existing.Name == f.Name { + return true + } + } + return false +} + +func filterContainsZeroAddress(addrs []common.Address) bool { + for _, addr := range addrs { + if addr == utils.ZeroAddress { + return true + } + } + return false +} diff --git a/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go new file mode 100644 index 00000000000..9ea08ec1421 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/logpollerutil/filters_test.go @@ -0,0 +1,156 @@ +package logpollerutil + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" +) + +func Test_FiltersDiff(t *testing.T) { + type args struct { + filtersBefore []logpoller.Filter + filtersNow []logpoller.Filter + } + tests := []struct { + name string + args args + wantCreated []logpoller.Filter + wantDeleted []logpoller.Filter + }{ + { + name: "no diff, both empty", + args: args{ + filtersBefore: []logpoller.Filter{}, + filtersNow: []logpoller.Filter{}, + }, + wantCreated: []logpoller.Filter{}, + wantDeleted: []logpoller.Filter{}, + }, + { + name: "no diff, both non-empty", + args: args{ + filtersBefore: []logpoller.Filter{{Name: "a"}}, + filtersNow: []logpoller.Filter{{Name: "a"}}, + }, + wantCreated: []logpoller.Filter{}, + wantDeleted: []logpoller.Filter{}, + }, + { + name: "no diff, only name matters", + args: args{ + filtersBefore: []logpoller.Filter{{Name: "a", Retention: time.Minute}}, + filtersNow: []logpoller.Filter{{Name: "a", Retention: time.Second}}, + }, + wantCreated: []logpoller.Filter{}, + wantDeleted: []logpoller.Filter{}, + }, + { + name: "diff for both created and deleted", + args: args{ + filtersBefore: []logpoller.Filter{{Name: "e"}, {Name: "a"}, {Name: "b"}}, + filtersNow: []logpoller.Filter{{Name: "a"}, {Name: "c"}, {Name: "d"}}, + }, + wantCreated: []logpoller.Filter{{Name: "c"}, {Name: "d"}}, + wantDeleted: []logpoller.Filter{{Name: "e"}, {Name: "b"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotCreated, gotDeleted := FiltersDiff(tt.args.filtersBefore, tt.args.filtersNow) + assert.Equalf(t, tt.wantCreated, gotCreated, "filtersDiff(%v, %v)", tt.args.filtersBefore, tt.args.filtersNow) + assert.Equalf(t, tt.wantDeleted, gotDeleted, "filtersDiff(%v, %v)", tt.args.filtersBefore, tt.args.filtersNow) + }) + } +} + +func Test_filterContainsZeroAddress(t *testing.T) { + type args struct { + addrs []common.Address + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "non-zero addrs", + args: args{ + addrs: []common.Address{ + common.HexToAddress("1"), + common.HexToAddress("2"), + common.HexToAddress("3"), + }, + }, + want: false, + }, + { + name: "empty", + args: args{addrs: []common.Address{}}, + want: false, + }, + { + name: "zero addr", + args: args{ + addrs: []common.Address{ + common.HexToAddress("1"), + common.HexToAddress("0"), + common.HexToAddress("2"), + common.HexToAddress("3"), + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, filterContainsZeroAddress(tt.args.addrs), "filterContainsZeroAddress(%v)", tt.args.addrs) + }) + } +} + +func Test_containsFilter(t *testing.T) { + type args struct { + filters []logpoller.Filter + f logpoller.Filter + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "empty", + args: args{ + filters: []logpoller.Filter{}, + f: logpoller.Filter{}, + }, + want: false, + }, + { + name: "contains", + args: args{ + filters: []logpoller.Filter{{Name: "a"}, {Name: "b"}}, + f: logpoller.Filter{Name: "b"}, + }, + want: true, + }, + { + name: "does not contain", + args: args{ + filters: []logpoller.Filter{{Name: "a"}, {Name: "b"}}, + f: logpoller.Filter{Name: "c"}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, + containsFilter(tt.args.filters, tt.args.f), "containsFilter(%v, %v)", tt.args.filters, tt.args.f) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go b/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go new file mode 100644 index 00000000000..6a1fb48f498 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/commit_store.go @@ -0,0 +1,75 @@ +package observability + +import ( + "context" + "time" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type ObservedCommitStoreReader struct { + ccipdata.CommitStoreReader + metric metricDetails +} + +func NewObservedCommitStoreReader(origin ccipdata.CommitStoreReader, chainID int64, pluginName string) *ObservedCommitStoreReader { + return &ObservedCommitStoreReader{ + CommitStoreReader: origin, + metric: metricDetails{ + interactionDuration: readerHistogram, + resultSetSize: readerDatasetSize, + pluginName: pluginName, + readerName: "CommitStoreReader", + chainId: chainID, + }, + } +} + +func (o *ObservedCommitStoreReader) GetExpectedNextSequenceNumber(context context.Context) (uint64, error) { + return withObservedInteraction(o.metric, "GetExpectedNextSequenceNumber", func() (uint64, error) { + return o.CommitStoreReader.GetExpectedNextSequenceNumber(context) + }) +} + +func (o *ObservedCommitStoreReader) GetLatestPriceEpochAndRound(context context.Context) (uint64, error) { + return withObservedInteraction(o.metric, "GetLatestPriceEpochAndRound", func() (uint64, error) { + return o.CommitStoreReader.GetLatestPriceEpochAndRound(context) + }) +} + +func (o *ObservedCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetCommitReportMatchingSeqNum", func() ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return o.CommitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, confs) + }) +} + +func (o *ObservedCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confs int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetAcceptedCommitReportsGteTimestamp", func() ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return o.CommitStoreReader.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confs) + }) +} + +func (o *ObservedCommitStoreReader) IsDown(ctx context.Context) (bool, error) { + return withObservedInteraction(o.metric, "IsDown", func() (bool, error) { + return o.CommitStoreReader.IsDown(ctx) + }) +} + +func (o *ObservedCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return withObservedInteraction(o.metric, "IsBlessed", func() (bool, error) { + return o.CommitStoreReader.IsBlessed(ctx, root) + }) +} + +func (o *ObservedCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + return withObservedInteraction(o.metric, "VerifyExecutionReport", func() (bool, error) { + return o.CommitStoreReader.VerifyExecutionReport(ctx, report) + }) +} + +func (o *ObservedCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + return withObservedInteraction(o.metric, "GetCommitStoreStaticConfig", func() (cciptypes.CommitStoreStaticConfig, error) { + return o.CommitStoreReader.GetCommitStoreStaticConfig(ctx) + }) +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/metrics.go b/core/services/ocr2/plugins/ccip/internal/observability/metrics.go new file mode 100644 index 00000000000..9e161fdd9ae --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/metrics.go @@ -0,0 +1,75 @@ +package observability + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + latencyBuckets = []float64{ + float64(10 * time.Millisecond), + float64(25 * time.Millisecond), + float64(50 * time.Millisecond), + float64(75 * time.Millisecond), + float64(100 * time.Millisecond), + float64(200 * time.Millisecond), + float64(300 * time.Millisecond), + float64(400 * time.Millisecond), + float64(500 * time.Millisecond), + float64(750 * time.Millisecond), + float64(1 * time.Second), + float64(2 * time.Second), + float64(3 * time.Second), + float64(4 * time.Second), + } + labels = []string{"evmChainID", "plugin", "reader", "function", "success"} + readerHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ccip_reader_duration", + Help: "Duration of calls to Reader instance", + Buckets: latencyBuckets, + }, labels) + readerDatasetSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ccip_reader_dataset_size", + Help: "Size of the dataset returned from the Reader instance", + }, labels) +) + +type metricDetails struct { + interactionDuration *prometheus.HistogramVec + resultSetSize *prometheus.GaugeVec + pluginName string + readerName string + chainId int64 +} + +func withObservedInteraction[T any](metric metricDetails, function string, f func() (T, error)) (T, error) { + contractExecutionStarted := time.Now() + value, err := f() + metric.interactionDuration. + WithLabelValues( + strconv.FormatInt(metric.chainId, 10), + metric.pluginName, + metric.readerName, + function, + strconv.FormatBool(err == nil), + ). + Observe(float64(time.Since(contractExecutionStarted))) + return value, err +} + +func withObservedInteractionAndResults[T any](metric metricDetails, function string, f func() ([]T, error)) ([]T, error) { + results, err := withObservedInteraction(metric, function, f) + if err == nil { + metric.resultSetSize.WithLabelValues( + strconv.FormatInt(metric.chainId, 10), + metric.pluginName, + metric.readerName, + function, + strconv.FormatBool(err == nil), + ).Set(float64(len(results))) + } + return results, err +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go b/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go new file mode 100644 index 00000000000..3d84acf9616 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/metrics_test.go @@ -0,0 +1,87 @@ +package observability + +import ( + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" +) + +func TestProperLabelsArePassed(t *testing.T) { + histogram := readerHistogram + successCounter := 10 + failedCounter := 5 + + details := metricDetails{ + interactionDuration: histogram, + pluginName: "plugin", + readerName: "reader", + chainId: 123, + } + + for i := 0; i < successCounter; i++ { + _, err := withObservedInteraction[string](details, "successFun", successfulContract) + require.NoError(t, err) + } + + for i := 0; i < failedCounter; i++ { + _, err := withObservedInteraction[string](details, "failedFun", failedContract) + require.Error(t, err) + } + + assert.Equal(t, successCounter, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "successFun", "true")) + assert.Equal(t, failedCounter, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "failedFun", "false")) + + assert.Equal(t, 0, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "failedFun", "true")) + assert.Equal(t, 0, counterFromHistogramByLabels(t, histogram, "123", "plugin", "reader", "successFun", "false")) +} + +func TestMetricsSendFromContractDirectly(t *testing.T) { + expectedCounter := 4 + ctx := testutils.Context(t) + chainId := int64(420) + + mockedOfframp := ccipdatamocks.NewOffRampReader(t) + mockedOfframp.On("GetTokens", ctx).Return(cciptypes.OffRampTokens{}, fmt.Errorf("execution error")) + + observedOfframp := NewObservedOffRampReader(mockedOfframp, chainId, "plugin") + + for i := 0; i < expectedCounter; i++ { + _, _ = observedOfframp.GetTokens(ctx) + } + + assert.Equal(t, expectedCounter, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetTokens", "false")) + assert.Equal(t, 0, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetPoolByDestToken", "false")) + assert.Equal(t, 0, counterFromHistogramByLabels(t, observedOfframp.metric.interactionDuration, "420", "plugin", "OffRampReader", "GetPoolByDestToken", "true")) +} + +func counterFromHistogramByLabels(t *testing.T, histogramVec *prometheus.HistogramVec, labels ...string) int { + observer, err := histogramVec.GetMetricWithLabelValues(labels...) + require.NoError(t, err) + + metricCh := make(chan prometheus.Metric, 1) + observer.(prometheus.Histogram).Collect(metricCh) + close(metricCh) + + metric := <-metricCh + pb := &io_prometheus_client.Metric{} + err = metric.Write(pb) + require.NoError(t, err) + + return int(pb.GetHistogram().GetSampleCount()) +} + +func successfulContract() (string, error) { + return "success", nil +} + +func failedContract() (string, error) { + return "", fmt.Errorf("just error") +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/offramp.go b/core/services/ocr2/plugins/ccip/internal/observability/offramp.go new file mode 100644 index 00000000000..b426bc8c91d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/offramp.go @@ -0,0 +1,69 @@ +package observability + +import ( + "context" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type ObservedOffRampReader struct { + ccipdata.OffRampReader + metric metricDetails +} + +func NewObservedOffRampReader(origin ccipdata.OffRampReader, chainID int64, pluginName string) *ObservedOffRampReader { + return &ObservedOffRampReader{ + OffRampReader: origin, + metric: metricDetails{ + interactionDuration: readerHistogram, + resultSetSize: readerDatasetSize, + pluginName: pluginName, + readerName: "OffRampReader", + chainId: chainID, + }, + } +} + +func (o *ObservedOffRampReader) GetExecutionStateChangesBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, confs int) ([]cciptypes.ExecutionStateChangedWithTxMeta, error) { + return withObservedInteraction(o.metric, "GetExecutionStateChangesBetweenSeqNums", func() ([]cciptypes.ExecutionStateChangedWithTxMeta, error) { + return o.OffRampReader.GetExecutionStateChangesBetweenSeqNums(ctx, seqNumMin, seqNumMax, confs) + }) +} + +func (o *ObservedOffRampReader) CurrentRateLimiterState(ctx context.Context) (cciptypes.TokenBucketRateLimit, error) { + return withObservedInteraction(o.metric, "CurrentRateLimiterState", func() (cciptypes.TokenBucketRateLimit, error) { + return o.OffRampReader.CurrentRateLimiterState(ctx) + }) +} + +func (o *ObservedOffRampReader) GetExecutionState(ctx context.Context, sequenceNumber uint64) (uint8, error) { + return withObservedInteraction(o.metric, "GetExecutionState", func() (uint8, error) { + return o.OffRampReader.GetExecutionState(ctx, sequenceNumber) + }) +} + +func (o *ObservedOffRampReader) GetStaticConfig(ctx context.Context) (cciptypes.OffRampStaticConfig, error) { + return withObservedInteraction(o.metric, "GetStaticConfig", func() (cciptypes.OffRampStaticConfig, error) { + return o.OffRampReader.GetStaticConfig(ctx) + }) +} + +func (o *ObservedOffRampReader) GetSourceToDestTokensMapping(ctx context.Context) (map[cciptypes.Address]cciptypes.Address, error) { + return withObservedInteraction(o.metric, "GetSourceToDestTokensMapping", func() (map[cciptypes.Address]cciptypes.Address, error) { + return o.OffRampReader.GetSourceToDestTokensMapping(ctx) + }) +} + +func (o *ObservedOffRampReader) GetTokens(ctx context.Context) (cciptypes.OffRampTokens, error) { + return withObservedInteraction(o.metric, "GetTokens", func() (cciptypes.OffRampTokens, error) { + return o.OffRampReader.GetTokens(ctx) + }) +} + +func (o *ObservedOffRampReader) GetSendersNonce(ctx context.Context, senders []cciptypes.Address) (map[cciptypes.Address]uint64, error) { + return withObservedInteraction(o.metric, "ListSenderNonces", func() (map[cciptypes.Address]uint64, error) { + return o.OffRampReader.ListSenderNonces(ctx, senders) + }) +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/onramp.go b/core/services/ocr2/plugins/ccip/internal/observability/onramp.go new file mode 100644 index 00000000000..b167bd57b06 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/onramp.go @@ -0,0 +1,63 @@ +package observability + +import ( + "context" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type ObservedOnRampReader struct { + ccipdata.OnRampReader + metric metricDetails +} + +func NewObservedOnRampReader(origin ccipdata.OnRampReader, chainID int64, pluginName string) *ObservedOnRampReader { + return &ObservedOnRampReader{ + OnRampReader: origin, + metric: metricDetails{ + interactionDuration: readerHistogram, + resultSetSize: readerDatasetSize, + pluginName: pluginName, + readerName: "OnRampReader", + chainId: chainID, + }, + } +} + +func (o ObservedOnRampReader) GetSendRequestsBetweenSeqNums(ctx context.Context, seqNumMin, seqNumMax uint64, finalized bool) ([]cciptypes.EVM2EVMMessageWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetSendRequestsBetweenSeqNums", func() ([]cciptypes.EVM2EVMMessageWithTxMeta, error) { + return o.OnRampReader.GetSendRequestsBetweenSeqNums(ctx, seqNumMin, seqNumMax, finalized) + }) +} + +func (o ObservedOnRampReader) RouterAddress(ctx context.Context) (cciptypes.Address, error) { + return withObservedInteraction(o.metric, "RouterAddress", func() (cciptypes.Address, error) { + return o.OnRampReader.RouterAddress(ctx) + }) +} + +func (o ObservedOnRampReader) GetDynamicConfig(ctx context.Context) (cciptypes.OnRampDynamicConfig, error) { + return withObservedInteraction(o.metric, "GetDynamicConfig", func() (cciptypes.OnRampDynamicConfig, error) { + return o.OnRampReader.GetDynamicConfig(ctx) + }) +} + +func (o ObservedOnRampReader) IsSourceCursed(ctx context.Context) (bool, error) { + return withObservedInteraction(o.metric, "IsSourceCursed", func() (bool, error) { + return o.OnRampReader.IsSourceCursed(ctx) + }) +} + +func (o ObservedOnRampReader) IsSourceChainHealthy(ctx context.Context) (bool, error) { + return withObservedInteraction(o.metric, "IsSourceChainHealthy", func() (bool, error) { + return o.OnRampReader.IsSourceChainHealthy(ctx) + }) +} + +func (o ObservedOnRampReader) SourcePriceRegistryAddress(ctx context.Context) (cciptypes.Address, error) { + return withObservedInteraction(o.metric, "SourcePriceRegistryAddress", func() (cciptypes.Address, error) { + return o.OnRampReader.SourcePriceRegistryAddress(ctx) + }) +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go b/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go new file mode 100644 index 00000000000..1918f632b94 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/onramp_observed_test.go @@ -0,0 +1,155 @@ +package observability + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" +) + +type MethodCall struct { + MethodName string + Arguments []interface{} + Returns []interface{} +} + +// The class expected to override the observed methods. +const expectedWrapper = "core/services/ocr2/plugins/ccip/internal/observability.ObservedOnRampReader" + +// TestOnRampObservedMethods tests that all methods of OnRampReader are observed by a wrapper. +// It uses the runtime to detect if the call stack contains the wrapper class. +func TestOnRampObservedMethods(t *testing.T) { + // Methods not expected to be observed. + // Add a method name here to exclude it from the test. + excludedMethods := []string{ + "Address", + "Close", + } + + // Defines the overridden method calls to test. + // Not defining a non-excluded method here will cause the test to fail with an explicit error. + methodCalls := make(map[string]MethodCall) + methodCalls["GetDynamicConfig"] = MethodCall{ + MethodName: "GetDynamicConfig", + Arguments: []interface{}{testutils.Context(t)}, + Returns: []interface{}{cciptypes.OnRampDynamicConfig{}, nil}, + } + methodCalls["GetSendRequestsBetweenSeqNums"] = MethodCall{ + MethodName: "GetSendRequestsBetweenSeqNums", + Arguments: []interface{}{testutils.Context(t), uint64(0), uint64(100), true}, + Returns: []interface{}{nil, nil}, + } + methodCalls["IsSourceChainHealthy"] = MethodCall{ + MethodName: "IsSourceChainHealthy", + Arguments: []interface{}{testutils.Context(t)}, + Returns: []interface{}{false, nil}, + } + methodCalls["IsSourceCursed"] = MethodCall{ + MethodName: "IsSourceCursed", + Arguments: []interface{}{testutils.Context(t)}, + Returns: []interface{}{false, nil}, + } + methodCalls["RouterAddress"] = MethodCall{ + MethodName: "RouterAddress", + Arguments: []interface{}{testutils.Context(t)}, + Returns: []interface{}{cciptypes.Address("0x0"), nil}, + } + methodCalls["SourcePriceRegistryAddress"] = MethodCall{ + MethodName: "SourcePriceRegistryAddress", + Arguments: []interface{}{testutils.Context(t)}, + Returns: []interface{}{cciptypes.Address("0x0"), nil}, + } + + // Test each method defined in the embedded type. + observed, reader := buildReader(t) + observedType := reflect.TypeOf(observed) + for i := 0; i < observedType.NumMethod(); i++ { + method := observedType.Method(i) + testMethod(t, method, methodCalls, excludedMethods, reader, observed) + } +} + +func testMethod(t *testing.T, method reflect.Method, methodCalls map[string]MethodCall, excludedMethods []string, reader *mocks.OnRampReader, observed ObservedOnRampReader) { + t.Run(fmt.Sprintf("observability_wrapper_%s", method.Name), func(t *testing.T) { + // Skip excluded methods. + for _, em := range excludedMethods { + if method.Name == em { + // Skipping ignore method (not an error). + return + } + } + + // Retrieve method call from definition (fail if not present). + mc := methodCalls[method.Name] + if mc.MethodName == "" { + assert.Fail(t, fmt.Sprintf("method %s not defined in methodCalls, please define it or exclude it.", method.Name)) + return + } + + assertCallByWrapper(t, reader, mc) + + // Perform call on observed object. + callParams := buildCallParams(mc) + methodc := reflect.ValueOf(&observed).MethodByName(mc.MethodName) + methodc.Call(callParams) + }) +} + +// Set the mock to fail if not called by the wrapper. +func assertCallByWrapper(t *testing.T, reader *mocks.OnRampReader, mc MethodCall) { + reader.On(mc.MethodName, mc.Arguments...).Maybe().Return(mc.Returns...).Run(func(args mock.Arguments) { + var i = 0 + var pc uintptr + var ok = true + for ok { + pc, _, _, ok = runtime.Caller(i) + f := runtime.FuncForPC(pc) + if strings.Contains(f.Name(), expectedWrapper) { + // Found the expected wrapper in the call stack. + return + } + i++ + } + assert.Fail(t, fmt.Sprintf("method %s not observed by wrapper. Please implement the method or add it to the excluded list.", mc.MethodName)) + }) +} + +func buildCallParams(mc MethodCall) []reflect.Value { + callParams := make([]reflect.Value, len(mc.Arguments)) + for i, arg := range mc.Arguments { + callParams[i] = reflect.ValueOf(arg) + } + return callParams +} + +// Build a mock reader and an observed wrapper to be used in the tests. +func buildReader(t *testing.T) (ObservedOnRampReader, *mocks.OnRampReader) { + labels = []string{"evmChainID", "plugin", "reader", "function", "success"} + ph := promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "test_histogram", + }, labels) + pg := promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "test_gauge", + }, labels) + metric := metricDetails{ + interactionDuration: ph, + resultSetSize: pg, + pluginName: "test plugin", + readerName: "test reader", + chainId: 1337, + } + reader := mocks.NewOnRampReader(t) + observed := ObservedOnRampReader{reader, metric} + return observed, reader +} diff --git a/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go b/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go new file mode 100644 index 00000000000..f5b87686d35 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/observability/price_registry.go @@ -0,0 +1,64 @@ +package observability + +import ( + "context" + "time" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" +) + +type ObservedPriceRegistryReader struct { + ccipdata.PriceRegistryReader + metric metricDetails +} + +func NewPriceRegistryReader(origin ccipdata.PriceRegistryReader, chainID int64, pluginName string) *ObservedPriceRegistryReader { + return &ObservedPriceRegistryReader{ + PriceRegistryReader: origin, + metric: metricDetails{ + interactionDuration: readerHistogram, + resultSetSize: readerDatasetSize, + pluginName: pluginName, + readerName: "PriceRegistryReader", + chainId: chainID, + }, + } +} + +func (o *ObservedPriceRegistryReader) GetTokenPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.TokenPriceUpdateWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetTokenPriceUpdatesCreatedAfter", func() ([]cciptypes.TokenPriceUpdateWithTxMeta, error) { + return o.PriceRegistryReader.GetTokenPriceUpdatesCreatedAfter(ctx, ts, confs) + }) +} + +func (o *ObservedPriceRegistryReader) GetGasPriceUpdatesCreatedAfter(ctx context.Context, chainSelector uint64, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetGasPriceUpdatesCreatedAfter", func() ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + return o.PriceRegistryReader.GetGasPriceUpdatesCreatedAfter(ctx, chainSelector, ts, confs) + }) +} + +func (o *ObservedPriceRegistryReader) GetAllGasPriceUpdatesCreatedAfter(ctx context.Context, ts time.Time, confs int) ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + return withObservedInteractionAndResults(o.metric, "GetAllGasPriceUpdatesCreatedAfter", func() ([]cciptypes.GasPriceUpdateWithTxMeta, error) { + return o.PriceRegistryReader.GetAllGasPriceUpdatesCreatedAfter(ctx, ts, confs) + }) +} + +func (o *ObservedPriceRegistryReader) GetFeeTokens(ctx context.Context) ([]cciptypes.Address, error) { + return withObservedInteraction(o.metric, "GetFeeTokens", func() ([]cciptypes.Address, error) { + return o.PriceRegistryReader.GetFeeTokens(ctx) + }) +} + +func (o *ObservedPriceRegistryReader) GetTokenPrices(ctx context.Context, wantedTokens []cciptypes.Address) ([]cciptypes.TokenPriceUpdate, error) { + return withObservedInteractionAndResults(o.metric, "GetTokenPrices", func() ([]cciptypes.TokenPriceUpdate, error) { + return o.PriceRegistryReader.GetTokenPrices(ctx, wantedTokens) + }) +} + +func (o *ObservedPriceRegistryReader) GetTokensDecimals(ctx context.Context, tokenAddresses []cciptypes.Address) ([]uint8, error) { + return withObservedInteractionAndResults(o.metric, "GetTokensDecimals", func() ([]uint8, error) { + return o.PriceRegistryReader.GetTokensDecimals(ctx, tokenAddresses) + }) +} diff --git a/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go new file mode 100644 index 00000000000..d2851e3a079 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle.go @@ -0,0 +1,218 @@ +package oraclelib + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services" + + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/job" +) + +type BackfilledOracle struct { + srcStartBlock, dstStartBlock uint64 + oracleStarted atomic.Bool + cancelFn context.CancelFunc + src, dst logpoller.LogPoller + oracle job.ServiceCtx + lggr logger.Logger +} + +func NewBackfilledOracle(lggr logger.Logger, src, dst logpoller.LogPoller, srcStartBlock, dstStartBlock uint64, oracle job.ServiceCtx) *BackfilledOracle { + return &BackfilledOracle{ + srcStartBlock: srcStartBlock, + dstStartBlock: dstStartBlock, + oracleStarted: atomic.Bool{}, + cancelFn: nil, + src: src, + dst: dst, + oracle: oracle, + lggr: lggr, + } +} + +func (r *BackfilledOracle) Start(_ context.Context) error { + go r.Run() + return nil +} + +func (r *BackfilledOracle) IsRunning() bool { + return r.oracleStarted.Load() +} + +func (r *BackfilledOracle) Run() { + ctx, cancelFn := context.WithCancel(context.Background()) + r.cancelFn = cancelFn + var err error + var errMu sync.Mutex + var wg sync.WaitGroup + // Replay in parallel if both requested. + if r.srcStartBlock != 0 { + wg.Add(1) + go func() { + defer wg.Done() + s := time.Now() + r.lggr.Infow("start replaying src chain", "fromBlock", r.srcStartBlock) + srcReplayErr := r.src.Replay(ctx, int64(r.srcStartBlock)) + errMu.Lock() + err = multierr.Combine(err, srcReplayErr) + errMu.Unlock() + r.lggr.Infow("finished replaying src chain", "time", time.Since(s)) + }() + } + if r.dstStartBlock != 0 { + wg.Add(1) + go func() { + defer wg.Done() + s := time.Now() + r.lggr.Infow("start replaying dst chain", "fromBlock", r.dstStartBlock) + dstReplayErr := r.dst.Replay(ctx, int64(r.dstStartBlock)) + errMu.Lock() + err = multierr.Combine(err, dstReplayErr) + errMu.Unlock() + r.lggr.Infow("finished replaying dst chain", "time", time.Since(s)) + }() + } + wg.Wait() + if err != nil { + r.lggr.Criticalw("unexpected error replaying, continuing plugin boot without all the logs backfilled", "err", err) + } + if err := ctx.Err(); err != nil { + r.lggr.Errorw("context already cancelled", "err", err) + return + } + // Start oracle with all logs present from dstStartBlock on dst and + // all logs from srcStartBlock on src. + if err := r.oracle.Start(ctx); err != nil { + // Should never happen. + r.lggr.Errorw("unexpected error starting oracle", "err", err) + } else { + r.oracleStarted.Store(true) + } +} + +func (r *BackfilledOracle) Close() error { + if r.oracleStarted.Load() { + // If the oracle is running, it must be Closed/stopped + if err := r.oracle.Close(); err != nil { + r.lggr.Errorw("unexpected error stopping oracle", "err", err) + return err + } + // Flag the oracle as closed with our internal variable that keeps track + // of its state. This will allow to re-start the process + r.oracleStarted.Store(false) + } + if r.cancelFn != nil { + // This is useful to step the previous tasks that are spawned in + // parallel before starting the Oracle. This will use the context to + // signal them to exit immediately. + // + // It can be possible this is the only way to stop the Start() async + // flow, specially when the previusly task are running (the replays) and + // `oracleStarted` would be false in that example. Calling `cancelFn()` + // will stop the replays and will prevent the oracle to start + r.cancelFn() + } + return nil +} + +func NewChainAgnosticBackFilledOracle(lggr logger.Logger, srcProvider services.ServiceCtx, dstProvider services.ServiceCtx, oracle job.ServiceCtx) *ChainAgnosticBackFilledOracle { + return &ChainAgnosticBackFilledOracle{ + srcProvider: srcProvider, + dstProvider: dstProvider, + oracle: oracle, + lggr: lggr, + } +} + +type ChainAgnosticBackFilledOracle struct { + srcProvider services.ServiceCtx + dstProvider services.ServiceCtx + oracle job.ServiceCtx + lggr logger.Logger + oracleStarted atomic.Bool + cancelFn context.CancelFunc +} + +func (r *ChainAgnosticBackFilledOracle) Start(_ context.Context) error { + go r.run() + return nil +} + +func (r *ChainAgnosticBackFilledOracle) run() { + ctx, cancelFn := context.WithCancel(context.Background()) + r.cancelFn = cancelFn + var err error + var errMu sync.Mutex + var wg sync.WaitGroup + // Replay in parallel if both requested. + wg.Add(1) + go func() { + defer wg.Done() + s := time.Now() + srcReplayErr := r.srcProvider.Start(ctx) + errMu.Lock() + err = multierr.Combine(err, srcReplayErr) + errMu.Unlock() + r.lggr.Infow("finished replaying src chain", "time", time.Since(s)) + }() + + wg.Add(1) + go func() { + defer wg.Done() + s := time.Now() + dstReplayErr := r.dstProvider.Start(ctx) + errMu.Lock() + err = multierr.Combine(err, dstReplayErr) + errMu.Unlock() + r.lggr.Infow("finished replaying dst chain", "time", time.Since(s)) + }() + + wg.Wait() + if err != nil { + r.lggr.Criticalw("unexpected error replaying, continuing plugin boot without all the logs backfilled", "err", err) + } + if err := ctx.Err(); err != nil { + r.lggr.Errorw("context already cancelled", "err", err) + } + // Start oracle with all logs present from dstStartBlock on dst and + // all logs from srcStartBlock on src. + if err := r.oracle.Start(ctx); err != nil { + // Should never happen. + r.lggr.Errorw("unexpected error starting oracle", "err", err) + } else { + r.oracleStarted.Store(true) + } +} + +func (r *ChainAgnosticBackFilledOracle) Close() error { + if r.oracleStarted.Load() { + // If the oracle is running, it must be Closed/stopped + // TODO: Close should be safe to call in either case? + if err := r.oracle.Close(); err != nil { + r.lggr.Errorw("unexpected error stopping oracle", "err", err) + return err + } + // Flag the oracle as closed with our internal variable that keeps track + // of its state. This will allow to re-start the process + r.oracleStarted.Store(false) + } + if r.cancelFn != nil { + // This is useful to step the previous tasks that are spawned in + // parallel before starting the Oracle. This will use the context to + // signal them to exit immediately. + // + // It can be possible this is the only way to stop the Start() async + // flow, specially when the previusly task are running (the replays) and + // `oracleStarted` would be false in that example. Calling `cancelFn()` + // will stop the replays and will prevent the oracle to start + r.cancelFn() + } + return nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go new file mode 100644 index 00000000000..6db1ebbadd9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/oraclelib/backfilled_oracle_test.go @@ -0,0 +1,56 @@ +package oraclelib + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + lpmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/logger" + jobmocks "github.com/smartcontractkit/chainlink/v2/core/services/job/mocks" +) + +func TestBackfilledOracle(t *testing.T) { + // First scenario: Start() fails, check that all Replay are being called. + lp1 := lpmocks.NewLogPoller(t) + lp2 := lpmocks.NewLogPoller(t) + lp1.On("Replay", mock.Anything, int64(1)).Return(nil) + lp2.On("Replay", mock.Anything, int64(2)).Return(nil) + oracle1 := jobmocks.NewServiceCtx(t) + oracle1.On("Start", mock.Anything).Return(errors.New("Failed to start")).Twice() + job := NewBackfilledOracle(logger.TestLogger(t), lp1, lp2, 1, 2, oracle1) + + job.Run() + assert.False(t, job.IsRunning()) + job.Run() + assert.False(t, job.IsRunning()) + + /// Start -> Stop -> Start + oracle2 := jobmocks.NewServiceCtx(t) + oracle2.On("Start", mock.Anything).Return(nil).Twice() + oracle2.On("Close").Return(nil).Once() + + job2 := NewBackfilledOracle(logger.TestLogger(t), lp1, lp2, 1, 2, oracle2) + job2.Run() + assert.True(t, job2.IsRunning()) + assert.Nil(t, job2.Close()) + assert.False(t, job2.IsRunning()) + assert.Nil(t, job2.Close()) + assert.False(t, job2.IsRunning()) + job2.Run() + assert.True(t, job2.IsRunning()) + + /// Replay fails, but it starts anyway + lp11 := lpmocks.NewLogPoller(t) + lp12 := lpmocks.NewLogPoller(t) + lp11.On("Replay", mock.Anything, int64(1)).Return(errors.New("Replay failed")).Once() + lp12.On("Replay", mock.Anything, int64(2)).Return(errors.New("Replay failed")).Once() + + oracle := jobmocks.NewServiceCtx(t) + oracle.On("Start", mock.Anything).Return(nil).Once() + job3 := NewBackfilledOracle(logger.NullLogger, lp11, lp12, 1, 2, oracle) + job3.Run() + assert.True(t, job3.IsRunning()) +} diff --git a/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go new file mode 100644 index 00000000000..48d0d261653 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint.go @@ -0,0 +1,44 @@ +package parseutil + +import ( + "math/big" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" +) + +func ParseBigIntFromAny(val any) (*big.Int, error) { + if val == nil { + return nil, errors.Errorf("nil value passed") + } + + switch v := val.(type) { + case decimal.Decimal: + return ParseBigIntFromString(v.String()) + case *decimal.Decimal: + return ParseBigIntFromString(v.String()) + case *big.Int: + return v, nil + case string: + return ParseBigIntFromString(v) + case int: + return big.NewInt(int64(v)), nil + case int64: + return big.NewInt(v), nil + case float64: + i := new(big.Int) + big.NewFloat(v).Int(i) + return i, nil + default: + return nil, errors.Errorf("unsupported big int type %T", val) + } +} + +func ParseBigIntFromString(v string) (*big.Int, error) { + valBigInt, success := new(big.Int).SetString(v, 10) + if !success { + return nil, errors.Errorf("unable to convert to integer %s", v) + } + + return valBigInt, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go new file mode 100644 index 00000000000..cea2f8cc19c --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/parseutil/bigint_test.go @@ -0,0 +1,42 @@ +package parseutil + +import ( + "math/big" + "testing" + + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" +) + +func TestParseBigIntFromAny(t *testing.T) { + decimalVal := decimal.New(123, 0) + + testCases := []struct { + name string + val any + res *big.Int + expErr bool + }{ + {name: "nil", val: nil, expErr: true}, + {name: "string", val: "123", res: big.NewInt(123)}, + {name: "decimal", val: decimal.New(123, 0), res: big.NewInt(123)}, + {name: "decimal pointer", val: &decimalVal, res: big.NewInt(123)}, + {name: "int64", val: int64(123), res: big.NewInt(123)}, + {name: "int", val: 123, res: big.NewInt(123)}, + {name: "float", val: 123.12, res: big.NewInt(123)}, + {name: "uint8", val: uint8(12), expErr: true}, + {name: "struct", val: struct{ name string }{name: "asd"}, expErr: true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := ParseBigIntFromAny(tc.val) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.res, res) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go new file mode 100644 index 00000000000..ed54428bd9b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm.go @@ -0,0 +1,239 @@ +package pricegetter + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/internal/gethwrappers2/generated/offchainaggregator" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +const decimalsMethodName = "decimals" +const latestRoundDataMethodName = "latestRoundData" + +func init() { + // Ensure existence of latestRoundData method on the Aggregator contract. + aggregatorABI, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI)) + if err != nil { + panic(err) + } + ensureMethodOnContract(aggregatorABI, decimalsMethodName) + ensureMethodOnContract(aggregatorABI, latestRoundDataMethodName) +} + +func ensureMethodOnContract(abi abi.ABI, methodName string) { + if _, ok := abi.Methods[methodName]; !ok { + panic(fmt.Errorf("method %s not found on ABI: %+v", methodName, abi.Methods)) + } +} + +type DynamicPriceGetterClient struct { + BatchCaller rpclib.EvmBatchCaller +} + +func NewDynamicPriceGetterClient(batchCaller rpclib.EvmBatchCaller) DynamicPriceGetterClient { + return DynamicPriceGetterClient{ + BatchCaller: batchCaller, + } +} + +type DynamicPriceGetter struct { + cfg config.DynamicPriceGetterConfig + evmClients map[uint64]DynamicPriceGetterClient + aggregatorAbi abi.ABI +} + +func NewDynamicPriceGetterConfig(configJson string) (config.DynamicPriceGetterConfig, error) { + priceGetterConfig := config.DynamicPriceGetterConfig{} + err := json.Unmarshal([]byte(configJson), &priceGetterConfig) + if err != nil { + return config.DynamicPriceGetterConfig{}, fmt.Errorf("parsing dynamic price getter config: %w", err) + } + err = priceGetterConfig.Validate() + if err != nil { + return config.DynamicPriceGetterConfig{}, fmt.Errorf("validating price getter config: %w", err) + } + return priceGetterConfig, nil +} + +// NewDynamicPriceGetter build a DynamicPriceGetter from a configuration and a map of chain ID to batch callers. +// A batch caller should be provided for all retrieved prices. +func NewDynamicPriceGetter(cfg config.DynamicPriceGetterConfig, evmClients map[uint64]DynamicPriceGetterClient) (*DynamicPriceGetter, error) { + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("validating dynamic price getter config: %w", err) + } + aggregatorAbi, err := abi.JSON(strings.NewReader(offchainaggregator.OffchainAggregatorABI)) + if err != nil { + return nil, fmt.Errorf("parsing offchainaggregator abi: %w", err) + } + priceGetter := DynamicPriceGetter{cfg, evmClients, aggregatorAbi} + return &priceGetter, nil +} + +// FilterConfiguredTokens implements the PriceGetter interface. +// It filters a list of token addresses for only those that have a price resolution rule configured on the PriceGetterConfig +func (d *DynamicPriceGetter) FilterConfiguredTokens(ctx context.Context, tokens []cciptypes.Address) (configured []cciptypes.Address, unconfigured []cciptypes.Address, err error) { + configured = []cciptypes.Address{} + unconfigured = []cciptypes.Address{} + for _, tk := range tokens { + evmAddr, err := ccipcalc.GenericAddrToEvm(tk) + if err != nil { + return nil, nil, err + } + + if _, isAgg := d.cfg.AggregatorPrices[evmAddr]; isAgg { + configured = append(configured, tk) + } else if _, isStatic := d.cfg.StaticPrices[evmAddr]; isStatic { + configured = append(configured, tk) + } else { + unconfigured = append(unconfigured, tk) + } + } + return configured, unconfigured, nil +} + +// TokenPricesUSD implements the PriceGetter interface. +// It returns static prices stored in the price getter, and batch calls aggregators (one per chain) to retrieve aggregator-based prices. +func (d *DynamicPriceGetter) TokenPricesUSD(ctx context.Context, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) { + prices, batchCallsPerChain, err := d.preparePricesAndBatchCallsPerChain(tokens) + if err != nil { + return nil, err + } + if err = d.performBatchCalls(ctx, batchCallsPerChain, prices); err != nil { + return nil, err + } + return prices, nil +} + +// performBatchCalls performs batch calls on all chains to retrieve token prices. +func (d *DynamicPriceGetter) performBatchCalls(ctx context.Context, batchCallsPerChain map[uint64]*batchCallsForChain, prices map[cciptypes.Address]*big.Int) error { + for chainID, batchCalls := range batchCallsPerChain { + if err := d.performBatchCall(ctx, chainID, batchCalls, prices); err != nil { + return err + } + } + return nil +} + +// performBatchCall performs a batch call on a given chain to retrieve token prices. +func (d *DynamicPriceGetter) performBatchCall(ctx context.Context, chainID uint64, batchCalls *batchCallsForChain, prices map[cciptypes.Address]*big.Int) error { + // Retrieve the EVM caller for the chain. + client, exists := d.evmClients[chainID] + if !exists { + return fmt.Errorf("evm caller for chain %d not found", chainID) + } + evmCaller := client.BatchCaller + + nbDecimalCalls := len(batchCalls.decimalCalls) + nbLatestRoundDataCalls := len(batchCalls.decimalCalls) + + // Perform batched call (all decimals calls followed by latest round data calls). + calls := make([]rpclib.EvmCall, 0, nbDecimalCalls+nbLatestRoundDataCalls) + calls = append(calls, batchCalls.decimalCalls...) + calls = append(calls, batchCalls.latestRoundDataCalls...) + + results, err := evmCaller.BatchCall(ctx, 0, calls) + if err != nil { + return fmt.Errorf("batch call on chain %d failed: %w", chainID, err) + } + + // Extract results. + decimals := make([]uint8, 0, nbDecimalCalls) + latestRounds := make([]*big.Int, 0, nbLatestRoundDataCalls) + + for i, res := range results[0:nbDecimalCalls] { + v, err1 := rpclib.ParseOutput[uint8](res, 0) + if err1 != nil { + callSignature := batchCalls.decimalCalls[i].String() + return fmt.Errorf("parse contract output while calling %v on chain %d: %w", callSignature, chainID, err1) + } + decimals = append(decimals, v) + } + + for i, res := range results[nbDecimalCalls : nbDecimalCalls+nbLatestRoundDataCalls] { + // latestRoundData function has multiple outputs (roundId,answer,startedAt,updatedAt,answeredInRound). + // we want the second one (answer, at idx=1). + v, err1 := rpclib.ParseOutput[*big.Int](res, 1) + if err1 != nil { + callSignature := batchCalls.latestRoundDataCalls[i].String() + return fmt.Errorf("parse contract output while calling %v on chain %d: %w", callSignature, chainID, err1) + } + latestRounds = append(latestRounds, v) + } + + // Normalize and store prices. + for i := range batchCalls.tokenOrder { + // Normalize to 1e18. + if decimals[i] < 18 { + latestRounds[i].Mul(latestRounds[i], big.NewInt(0).Exp(big.NewInt(10), big.NewInt(18-int64(decimals[i])), nil)) + } else if decimals[i] > 18 { + latestRounds[i].Div(latestRounds[i], big.NewInt(0).Exp(big.NewInt(10), big.NewInt(int64(decimals[i])-18), nil)) + } + prices[ccipcalc.EvmAddrToGeneric(batchCalls.tokenOrder[i])] = latestRounds[i] + } + return nil +} + +// preparePricesAndBatchCallsPerChain uses this price getter to prepare for a list of tokens: +// - the map of token address to their prices (static prices) +// - the map of and batch calls per chain for the given tokens (dynamic prices) +func (d *DynamicPriceGetter) preparePricesAndBatchCallsPerChain(tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, map[uint64]*batchCallsForChain, error) { + prices := make(map[cciptypes.Address]*big.Int, len(tokens)) + batchCallsPerChain := make(map[uint64]*batchCallsForChain) + evmAddrs, err := ccipcalc.GenericAddrsToEvm(tokens...) + if err != nil { + return nil, nil, err + } + for _, tk := range evmAddrs { + if aggCfg, isAgg := d.cfg.AggregatorPrices[tk]; isAgg { + // Batch calls for aggregator-based token prices (one per chain). + if _, exists := batchCallsPerChain[aggCfg.ChainID]; !exists { + batchCallsPerChain[aggCfg.ChainID] = &batchCallsForChain{ + decimalCalls: []rpclib.EvmCall{}, + latestRoundDataCalls: []rpclib.EvmCall{}, + tokenOrder: []common.Address{}, + } + } + chainCalls := batchCallsPerChain[aggCfg.ChainID] + chainCalls.decimalCalls = append(chainCalls.decimalCalls, rpclib.NewEvmCall( + d.aggregatorAbi, + decimalsMethodName, + aggCfg.AggregatorContractAddress, + )) + chainCalls.latestRoundDataCalls = append(chainCalls.latestRoundDataCalls, rpclib.NewEvmCall( + d.aggregatorAbi, + latestRoundDataMethodName, + aggCfg.AggregatorContractAddress, + )) + chainCalls.tokenOrder = append(chainCalls.tokenOrder, tk) + } else if staticCfg, isStatic := d.cfg.StaticPrices[tk]; isStatic { + // Fill static prices. + prices[ccipcalc.EvmAddrToGeneric(tk)] = staticCfg.Price + } else { + return nil, nil, fmt.Errorf("no price resolution rule for token %s", tk.Hex()) + } + } + return prices, batchCallsPerChain, nil +} + +// batchCallsForChain Defines the batch calls to perform on a given chain. +type batchCallsForChain struct { + decimalCalls []rpclib.EvmCall + latestRoundDataCalls []rpclib.EvmCall + tokenOrder []common.Address // required to maintain the order of the batched rpc calls for mapping the results. +} + +func (d *DynamicPriceGetter) Close() error { + return nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go new file mode 100644 index 00000000000..673b9776c79 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/evm_test.go @@ -0,0 +1,546 @@ +package pricegetter + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/aggregator_v3_interface" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks" +) + +type testParameters struct { + cfg config.DynamicPriceGetterConfig + evmClients map[uint64]DynamicPriceGetterClient + tokens []common.Address + expectedTokenPrices map[common.Address]big.Int + evmCallErr bool + invalidConfigErrorExpected bool + priceResolutionErrorExpected bool +} + +func TestDynamicPriceGetter(t *testing.T) { + tests := []struct { + name string + param testParameters + }{ + { + name: "aggregator_only_valid", + param: testParamAggregatorOnly(t), + }, + { + name: "aggregator_only_valid_multi", + param: testParamAggregatorOnlyMulti(t), + }, + { + name: "static_only_valid", + param: testParamStaticOnly(), + }, + { + name: "aggregator_and_static_valid", + param: testParamAggregatorAndStaticValid(t), + }, + { + name: "aggregator_and_static_token_collision", + param: testParamAggregatorAndStaticTokenCollision(t), + }, + { + name: "no_aggregator_for_token", + param: testParamNoAggregatorForToken(t), + }, + { + name: "batchCall_returns_err", + param: testParamBatchCallReturnsErr(t), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pg, err := NewDynamicPriceGetter(test.param.cfg, test.param.evmClients) + if test.param.invalidConfigErrorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + ctx := testutils.Context(t) + // Check configured token + unconfiguredTk := cciptypes.Address(utils.RandomAddress().String()) + cfgTokens, uncfgTokens, err := pg.FilterConfiguredTokens(ctx, []cciptypes.Address{unconfiguredTk}) + require.NoError(t, err) + assert.Equal(t, []cciptypes.Address{}, cfgTokens) + assert.Equal(t, []cciptypes.Address{unconfiguredTk}, uncfgTokens) + // Build list of tokens to query. + tokens := make([]cciptypes.Address, 0, len(test.param.tokens)) + for _, tk := range test.param.tokens { + tokenAddr := ccipcalc.EvmAddrToGeneric(tk) + tokens = append(tokens, tokenAddr) + } + prices, err := pg.TokenPricesUSD(ctx, tokens) + + if test.param.evmCallErr { + require.Error(t, err) + return + } + + if test.param.priceResolutionErrorExpected { + require.Error(t, err) + return + } + require.NoError(t, err) + // we expect prices for at least all queried tokens (it is possible that additional tokens are returned). + assert.True(t, len(prices) >= len(test.param.expectedTokenPrices)) + // Check prices are matching expected result. + for tk, expectedPrice := range test.param.expectedTokenPrices { + if prices[cciptypes.Address(tk.String())] == nil { + assert.Fail(t, "Token price not found") + } + assert.Equal(t, 0, expectedPrice.Cmp(prices[cciptypes.Address(tk.String())]), + "Token price mismatch: expected price %v, got %v", expectedPrice, *prices[cciptypes.Address(tk.String())]) + } + }) + } +} + +func testParamAggregatorOnly(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + tk4 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk3: { + ChainID: 103, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk4: { + ChainID: 104, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{}, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + // Real ETH/USD example from OP. + round2 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(2000), + Answer: big.NewInt(238879815123), + StartedAt: big.NewInt(1704897197), + UpdatedAt: big.NewInt(1704897197), + AnsweredInRound: big.NewInt(2000), + } + // Real LINK/ETH example from OP. + round3 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(3000), + Answer: big.NewInt(4468862777874802), + StartedAt: big.NewInt(1715743907), + UpdatedAt: big.NewInt(1715743907), + AnsweredInRound: big.NewInt(3000), + } + // Fake data for a token with more than 18 decimals. + round4 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(4000), + Answer: multExp(big.NewInt(1234567890), 10), // 20 digits. + StartedAt: big.NewInt(1715753907), + UpdatedAt: big.NewInt(1715753907), + AnsweredInRound: big.NewInt(4000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}), + uint64(103): mockClient(t, []uint8{18}, []aggregator_v3_interface.LatestRoundData{round3}), + uint64(104): mockClient(t, []uint8{20}, []aggregator_v3_interface.LatestRoundData{round4}), + } + expectedTokenPrices := map[common.Address]big.Int{ + tk1: *multExp(round1.Answer, 10), // expected in 1e18 format. + tk2: *multExp(round2.Answer, 10), // expected in 1e18 format. + tk3: *round3.Answer, // already in 1e18 format (contract decimals==18). + tk4: *multExp(big.NewInt(1234567890), 8), // expected in 1e18 format. + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3, tk4}, + expectedTokenPrices: expectedTokenPrices, + invalidConfigErrorExpected: false, + } +} + +// testParamAggregatorOnlyMulti test with several tokens on chain 102. +func testParamAggregatorOnlyMulti(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk3: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{}, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + // Real ETH/USD example from OP. + round2 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(2000), + Answer: big.NewInt(238879815123), + StartedAt: big.NewInt(1704897197), + UpdatedAt: big.NewInt(1704897197), + AnsweredInRound: big.NewInt(2000), + } + round3 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(3000), + Answer: big.NewInt(238879815125), + StartedAt: big.NewInt(1704897198), + UpdatedAt: big.NewInt(1704897198), + AnsweredInRound: big.NewInt(3000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): mockClient(t, []uint8{8, 8}, []aggregator_v3_interface.LatestRoundData{round2, round3}), + } + expectedTokenPrices := map[common.Address]big.Int{ + tk1: *multExp(round1.Answer, 10), + tk2: *multExp(round2.Answer, 10), + tk3: *multExp(round3.Answer, 10), + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + invalidConfigErrorExpected: false, + tokens: []common.Address{tk1, tk2, tk3}, + expectedTokenPrices: expectedTokenPrices, + } +} + +func testParamStaticOnly() testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{}, + StaticPrices: map[common.Address]config.StaticPriceConfig{ + tk1: { + ChainID: 101, + Price: big.NewInt(1_234_000), + }, + tk2: { + ChainID: 102, + Price: big.NewInt(2_234_000), + }, + tk3: { + ChainID: 103, + Price: big.NewInt(3_234_000), + }, + }, + } + // Real LINK/USD example from OP. + evmClients := map[uint64]DynamicPriceGetterClient{} + expectedTokenPrices := map[common.Address]big.Int{ + tk1: *cfg.StaticPrices[tk1].Price, + tk2: *cfg.StaticPrices[tk2].Price, + tk3: *cfg.StaticPrices[tk3].Price, + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3}, + expectedTokenPrices: expectedTokenPrices, + } +} + +func testParamAggregatorAndStaticValid(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{ + tk3: { + ChainID: 103, + Price: big.NewInt(1_234_000), + }, + }, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + // Real ETH/USD example from OP. + round2 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(2000), + Answer: big.NewInt(238879815123), + StartedAt: big.NewInt(1704897197), + UpdatedAt: big.NewInt(1704897197), + AnsweredInRound: big.NewInt(2000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}), + } + expectedTokenPrices := map[common.Address]big.Int{ + tk1: *multExp(round1.Answer, 10), + tk2: *multExp(round2.Answer, 10), + tk3: *cfg.StaticPrices[tk3].Price, + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3}, + expectedTokenPrices: expectedTokenPrices, + } +} + +func testParamAggregatorAndStaticTokenCollision(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk3: { + ChainID: 103, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{ + tk3: { + ChainID: 103, + Price: big.NewInt(1_234_000), + }, + }, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + // Real ETH/USD example from OP. + round2 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(2000), + Answer: big.NewInt(238879815123), + StartedAt: big.NewInt(1704897197), + UpdatedAt: big.NewInt(1704897197), + AnsweredInRound: big.NewInt(2000), + } + round3 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(3000), + Answer: big.NewInt(238879815124), + StartedAt: big.NewInt(1704897198), + UpdatedAt: big.NewInt(1704897198), + AnsweredInRound: big.NewInt(3000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}), + uint64(103): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round3}), + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3}, + invalidConfigErrorExpected: true, + } +} + +func testParamNoAggregatorForToken(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + tk4 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{ + tk3: { + ChainID: 103, + Price: big.NewInt(1_234_000), + }, + }, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + // Real ETH/USD example from OP. + round2 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(2000), + Answer: big.NewInt(238879815123), + StartedAt: big.NewInt(1704897197), + UpdatedAt: big.NewInt(1704897197), + AnsweredInRound: big.NewInt(2000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round2}), + } + expectedTokenPrices := map[common.Address]big.Int{ + tk1: *round1.Answer, + tk2: *round2.Answer, + tk3: *cfg.StaticPrices[tk3].Price, + tk4: *big.NewInt(0), + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3, tk4}, + expectedTokenPrices: expectedTokenPrices, + priceResolutionErrorExpected: true, + } +} + +func testParamBatchCallReturnsErr(t *testing.T) testParameters { + tk1 := utils.RandomAddress() + tk2 := utils.RandomAddress() + tk3 := utils.RandomAddress() + cfg := config.DynamicPriceGetterConfig{ + AggregatorPrices: map[common.Address]config.AggregatorPriceConfig{ + tk1: { + ChainID: 101, + AggregatorContractAddress: utils.RandomAddress(), + }, + tk2: { + ChainID: 102, + AggregatorContractAddress: utils.RandomAddress(), + }, + }, + StaticPrices: map[common.Address]config.StaticPriceConfig{ + tk3: { + ChainID: 103, + Price: big.NewInt(1_234_000), + }, + }, + } + // Real LINK/USD example from OP. + round1 := aggregator_v3_interface.LatestRoundData{ + RoundId: big.NewInt(1000), + Answer: big.NewInt(1396818990), + StartedAt: big.NewInt(1704896575), + UpdatedAt: big.NewInt(1704896575), + AnsweredInRound: big.NewInt(1000), + } + evmClients := map[uint64]DynamicPriceGetterClient{ + uint64(101): mockClient(t, []uint8{8}, []aggregator_v3_interface.LatestRoundData{round1}), + uint64(102): { + BatchCaller: mockErrCaller(t), + }, + } + return testParameters{ + cfg: cfg, + evmClients: evmClients, + tokens: []common.Address{tk1, tk2, tk3}, + evmCallErr: true, + } +} + +func mockClient(t *testing.T, decimals []uint8, rounds []aggregator_v3_interface.LatestRoundData) DynamicPriceGetterClient { + return DynamicPriceGetterClient{ + BatchCaller: mockCaller(t, decimals, rounds), + } +} + +func mockCaller(t *testing.T, decimals []uint8, rounds []aggregator_v3_interface.LatestRoundData) *rpclibmocks.EvmBatchCaller { + caller := rpclibmocks.NewEvmBatchCaller(t) + + // Mock batch calls per chain: all decimals calls then all latestRoundData calls. + dataAndErrs := make([]rpclib.DataAndErr, 0, len(decimals)+len(rounds)) + for _, d := range decimals { + dataAndErrs = append(dataAndErrs, rpclib.DataAndErr{ + Outputs: []any{d}, + }) + } + for _, round := range rounds { + dataAndErrs = append(dataAndErrs, rpclib.DataAndErr{ + Outputs: []any{round.RoundId, round.Answer, round.StartedAt, round.UpdatedAt, round.AnsweredInRound}, + }) + } + caller.On("BatchCall", mock.Anything, uint64(0), mock.Anything).Return(dataAndErrs, nil).Maybe() + return caller +} + +func mockErrCaller(t *testing.T) *rpclibmocks.EvmBatchCaller { + caller := rpclibmocks.NewEvmBatchCaller(t) + caller.On("BatchCall", mock.Anything, uint64(0), mock.Anything).Return(nil, assert.AnError).Maybe() + return caller +} + +// multExp returns the result of multiplying x by 10^e. +func multExp(x *big.Int, e int64) *big.Int { + return big.NewInt(0).Mul(x, big.NewInt(0).Exp(big.NewInt(10), big.NewInt(e), nil)) +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go new file mode 100644 index 00000000000..195649685b2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/mock.go @@ -0,0 +1,211 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package pricegetter + +import ( + context "context" + big "math/big" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + mock "github.com/stretchr/testify/mock" +) + +// MockPriceGetter is an autogenerated mock type for the PriceGetter type +type MockPriceGetter struct { + mock.Mock +} + +type MockPriceGetter_Expecter struct { + mock *mock.Mock +} + +func (_m *MockPriceGetter) EXPECT() *MockPriceGetter_Expecter { + return &MockPriceGetter_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *MockPriceGetter) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockPriceGetter_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type MockPriceGetter_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *MockPriceGetter_Expecter) Close() *MockPriceGetter_Close_Call { + return &MockPriceGetter_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *MockPriceGetter_Close_Call) Run(run func()) *MockPriceGetter_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockPriceGetter_Close_Call) Return(_a0 error) *MockPriceGetter_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockPriceGetter_Close_Call) RunAndReturn(run func() error) *MockPriceGetter_Close_Call { + _c.Call.Return(run) + return _c +} + +// FilterConfiguredTokens provides a mock function with given fields: ctx, tokens +func (_m *MockPriceGetter) FilterConfiguredTokens(ctx context.Context, tokens []ccip.Address) ([]ccip.Address, []ccip.Address, error) { + ret := _m.Called(ctx, tokens) + + if len(ret) == 0 { + panic("no return value specified for FilterConfiguredTokens") + } + + var r0 []ccip.Address + var r1 []ccip.Address + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) ([]ccip.Address, []ccip.Address, error)); ok { + return rf(ctx, tokens) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) []ccip.Address); ok { + r0 = rf(ctx, tokens) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ccip.Address) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) []ccip.Address); ok { + r1 = rf(ctx, tokens) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]ccip.Address) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, []ccip.Address) error); ok { + r2 = rf(ctx, tokens) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockPriceGetter_FilterConfiguredTokens_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterConfiguredTokens' +type MockPriceGetter_FilterConfiguredTokens_Call struct { + *mock.Call +} + +// FilterConfiguredTokens is a helper method to define mock.On call +// - ctx context.Context +// - tokens []ccip.Address +func (_e *MockPriceGetter_Expecter) FilterConfiguredTokens(ctx interface{}, tokens interface{}) *MockPriceGetter_FilterConfiguredTokens_Call { + return &MockPriceGetter_FilterConfiguredTokens_Call{Call: _e.mock.On("FilterConfiguredTokens", ctx, tokens)} +} + +func (_c *MockPriceGetter_FilterConfiguredTokens_Call) Run(run func(ctx context.Context, tokens []ccip.Address)) *MockPriceGetter_FilterConfiguredTokens_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *MockPriceGetter_FilterConfiguredTokens_Call) Return(configured []ccip.Address, unconfigured []ccip.Address, err error) *MockPriceGetter_FilterConfiguredTokens_Call { + _c.Call.Return(configured, unconfigured, err) + return _c +} + +func (_c *MockPriceGetter_FilterConfiguredTokens_Call) RunAndReturn(run func(context.Context, []ccip.Address) ([]ccip.Address, []ccip.Address, error)) *MockPriceGetter_FilterConfiguredTokens_Call { + _c.Call.Return(run) + return _c +} + +// TokenPricesUSD provides a mock function with given fields: ctx, tokens +func (_m *MockPriceGetter) TokenPricesUSD(ctx context.Context, tokens []ccip.Address) (map[ccip.Address]*big.Int, error) { + ret := _m.Called(ctx, tokens) + + if len(ret) == 0 { + panic("no return value specified for TokenPricesUSD") + } + + var r0 map[ccip.Address]*big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) (map[ccip.Address]*big.Int, error)); ok { + return rf(ctx, tokens) + } + if rf, ok := ret.Get(0).(func(context.Context, []ccip.Address) map[ccip.Address]*big.Int); ok { + r0 = rf(ctx, tokens) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[ccip.Address]*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ccip.Address) error); ok { + r1 = rf(ctx, tokens) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockPriceGetter_TokenPricesUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TokenPricesUSD' +type MockPriceGetter_TokenPricesUSD_Call struct { + *mock.Call +} + +// TokenPricesUSD is a helper method to define mock.On call +// - ctx context.Context +// - tokens []ccip.Address +func (_e *MockPriceGetter_Expecter) TokenPricesUSD(ctx interface{}, tokens interface{}) *MockPriceGetter_TokenPricesUSD_Call { + return &MockPriceGetter_TokenPricesUSD_Call{Call: _e.mock.On("TokenPricesUSD", ctx, tokens)} +} + +func (_c *MockPriceGetter_TokenPricesUSD_Call) Run(run func(ctx context.Context, tokens []ccip.Address)) *MockPriceGetter_TokenPricesUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]ccip.Address)) + }) + return _c +} + +func (_c *MockPriceGetter_TokenPricesUSD_Call) Return(_a0 map[ccip.Address]*big.Int, _a1 error) *MockPriceGetter_TokenPricesUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockPriceGetter_TokenPricesUSD_Call) RunAndReturn(run func(context.Context, []ccip.Address) (map[ccip.Address]*big.Int, error)) *MockPriceGetter_TokenPricesUSD_Call { + _c.Call.Return(run) + return _c +} + +// NewMockPriceGetter creates a new instance of MockPriceGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockPriceGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *MockPriceGetter { + mock := &MockPriceGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go new file mode 100644 index 00000000000..ae9a10deb65 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline.go @@ -0,0 +1,114 @@ +package pricegetter + +import ( + "context" + "math/big" + "strings" + "time" + + mapset "github.com/deckarep/golang-set/v2" + "github.com/google/uuid" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/parseutil" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" +) + +var _ PriceGetter = &PipelineGetter{} + +type PipelineGetter struct { + source string + runner pipeline.Runner + jobID int32 + externalJobID uuid.UUID + name string + lggr logger.Logger +} + +func NewPipelineGetter(source string, runner pipeline.Runner, jobID int32, externalJobID uuid.UUID, name string, lggr logger.Logger) (*PipelineGetter, error) { + _, err := pipeline.Parse(source) + if err != nil { + return nil, err + } + + return &PipelineGetter{ + source: source, + runner: runner, + jobID: jobID, + externalJobID: externalJobID, + name: name, + lggr: lggr, + }, nil +} + +// FilterForConfiguredTokens implements the PriceGetter interface. +// It filters a list of token addresses for only those that have a pipeline job configured on the TokenPricesUSDPipeline +func (d *PipelineGetter) FilterConfiguredTokens(ctx context.Context, tokens []cciptypes.Address) (configured []cciptypes.Address, unconfigured []cciptypes.Address, err error) { + lcSource := strings.ToLower(d.source) + for _, tk := range tokens { + lcToken := strings.ToLower(string(tk)) + if strings.Contains(lcSource, lcToken) { + configured = append(configured, tk) + } else { + unconfigured = append(unconfigured, tk) + } + } + return configured, unconfigured, nil +} + +func (d *PipelineGetter) TokenPricesUSD(ctx context.Context, tokens []cciptypes.Address) (map[cciptypes.Address]*big.Int, error) { + _, trrs, err := d.runner.ExecuteRun(ctx, pipeline.Spec{ + ID: d.jobID, + DotDagSource: d.source, + CreatedAt: time.Now(), + JobID: d.jobID, + JobName: d.name, + JobType: "", + }, pipeline.NewVarsFrom(map[string]interface{}{})) + if err != nil { + return nil, err + } + finalResult := trrs.FinalResult() + if finalResult.HasErrors() { + return nil, errors.Errorf("error getting prices %v", finalResult.AllErrors) + } + if len(finalResult.Values) != 1 { + return nil, errors.Errorf("invalid number of price results, expected 1 got %v", len(finalResult.Values)) + } + prices, ok := finalResult.Values[0].(map[string]interface{}) + if !ok { + return nil, errors.Errorf("expected map output of price pipeline, got %T", finalResult.Values[0]) + } + + providedTokensSet := mapset.NewSet(tokens...) + tokenPrices := make(map[cciptypes.Address]*big.Int) + for tokenAddressStr, rawPrice := range prices { + tokenAddressStr := ccipcalc.HexToAddress(tokenAddressStr) + castedPrice, err := parseutil.ParseBigIntFromAny(rawPrice) + if err != nil { + return nil, err + } + + if providedTokensSet.Contains(tokenAddressStr) { + tokenPrices[tokenAddressStr] = castedPrice + } + } + + // The mapping of token address to source of token price has to live offchain. + // Best we can do is sanity check that the token price spec covers all our desired execution token prices. + for _, token := range tokens { + if _, ok = tokenPrices[token]; !ok { + return nil, errors.Errorf("missing token %s from tokensForFeeCoin spec, got %v", token, prices) + } + } + + return tokenPrices, nil +} + +func (d *PipelineGetter) Close() error { + return d.runner.Close() +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go new file mode 100644 index 00000000000..37970750732 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pipeline_test.go @@ -0,0 +1,178 @@ +package pricegetter_test + +import ( + "context" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + config2 "github.com/smartcontractkit/chainlink-common/pkg/config" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/bridges" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" + + pipelinemocks "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/mocks" + + config "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" +) + +func TestDataSource(t *testing.T) { + linkEth := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"JuelsPerETH": "200000000000000000000"}`)) + require.NoError(t, err) + })) + defer linkEth.Close() + usdcEth := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"USDCWeiPerETH": "1000000000000000000000"}`)) // 1000 USDC / ETH + require.NoError(t, err) + })) + defer usdcEth.Close() + linkTokenAddress := ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e05") + usdcTokenAddress := ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e10") + source := fmt.Sprintf(` + // Price 1 + link [type=http method=GET url="%s"]; + link_parse [type=jsonparse path="JuelsPerETH"]; + link->link_parse; + // Price 2 + usdc [type=http method=GET url="%s"]; + usdc_parse [type=jsonparse path="USDCWeiPerETH"]; + usdc->usdc_parse; + merge [type=merge left="{}" right="{\"%s\":$(link_parse), \"%s\":$(usdc_parse)}"]; +`, linkEth.URL, usdcEth.URL, linkTokenAddress, usdcTokenAddress) + + priceGetter := newTestPipelineGetter(t, source) + + // USDC & LINK are configured + confTokens, _, err := priceGetter.FilterConfiguredTokens(context.Background(), []cciptypes.Address{linkTokenAddress, usdcTokenAddress}) + require.NoError(t, err) + assert.Equal(t, linkTokenAddress, confTokens[0]) + assert.Equal(t, usdcTokenAddress, confTokens[1]) + + // Ask for all prices present in spec. + prices, err := priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{ + linkTokenAddress, + usdcTokenAddress, + }) + require.NoError(t, err) + assert.Equal(t, prices, map[cciptypes.Address]*big.Int{ + linkTokenAddress: big.NewInt(0).Mul(big.NewInt(200), big.NewInt(1000000000000000000)), + usdcTokenAddress: big.NewInt(0).Mul(big.NewInt(1000), big.NewInt(1000000000000000000)), + }) + + // Ask a non-existent price. + _, err = priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{ + ccipcalc.HexToAddress("0x1591690b8638f5fb2dbec82ac741805ac5da8b45dc5263f4875b0496fdce4e11"), + }) + require.Error(t, err) + + // Ask only one price + prices, err = priceGetter.TokenPricesUSD(context.Background(), []cciptypes.Address{linkTokenAddress}) + require.NoError(t, err) + assert.Equal(t, prices, map[cciptypes.Address]*big.Int{ + linkTokenAddress: big.NewInt(0).Mul(big.NewInt(200), big.NewInt(1000000000000000000)), + }) +} + +func TestParsingDifferentFormats(t *testing.T) { + tests := []struct { + name string + inputValue string + expectedValue *big.Int + expectedError bool + }{ + { + name: "number as string", + inputValue: "\"200000000000000000000\"", + expectedValue: new(big.Int).Mul(big.NewInt(200), big.NewInt(1e18)), + }, + { + name: "number as big number", + inputValue: "500000000000000000000", + expectedValue: new(big.Int).Mul(big.NewInt(500), big.NewInt(1e18)), + }, + { + name: "number as int64", + inputValue: "150", + expectedValue: big.NewInt(150), + }, + { + name: "number in scientific notation", + inputValue: "3e22", + expectedValue: new(big.Int).Mul(big.NewInt(30000), big.NewInt(1e18)), + }, + { + name: "number as string in scientific notation returns error", + inputValue: "\"3e22\"", + expectedError: true, + }, + { + name: "invalid value should return error", + inputValue: "\"NaN\"", + expectedError: true, + }, + { + name: "null should return error", + inputValue: "null", + expectedError: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + token := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := fmt.Fprintf(w, `{"MyCoin": %s}`, tt.inputValue) + require.NoError(t, err) + })) + defer token.Close() + + address := common.HexToAddress("0x94025780a1aB58868D9B2dBBB775f44b32e8E6e5") + source := fmt.Sprintf(` + // Price 1 + coin [type=http method=GET url="%s"]; + coin_parse [type=jsonparse path="MyCoin"]; + coin->coin_parse; + merge [type=merge left="{}" right="{\"%s\":$(coin_parse)}"]; + `, token.URL, strings.ToLower(address.String())) + + prices, err := newTestPipelineGetter(t, source). + TokenPricesUSD(context.Background(), []cciptypes.Address{ccipcalc.EvmAddrToGeneric(address)}) + + if tt.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, prices[ccipcalc.EvmAddrToGeneric(address)], tt.expectedValue) + } + }) + } +} + +func newTestPipelineGetter(t *testing.T, source string) *pricegetter.PipelineGetter { + lggr, _ := logger.NewLogger() + cfg := pipelinemocks.NewConfig(t) + cfg.On("MaxRunDuration").Return(time.Second) + cfg.On("DefaultHTTPTimeout").Return(*config2.MustNewDuration(time.Second)) + cfg.On("DefaultHTTPLimit").Return(int64(1024 * 10)) + cfg.On("VerboseLogging").Return(true) + db := pgtest.NewSqlxDB(t) + bridgeORM := bridges.NewORM(db) + runner := pipeline.NewRunner(pipeline.NewORM(db, lggr, config.NewTestGeneralConfig(t).JobPipeline().MaxSuccessfulRuns()), + bridgeORM, cfg, nil, nil, nil, nil, lggr, &http.Client{}, &http.Client{}) + ds, err := pricegetter.NewPipelineGetter(source, runner, 1, uuid.New(), "test", lggr) + require.NoError(t, err) + return ds +} diff --git a/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go b/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go new file mode 100644 index 00000000000..9ee0e8f3d0a --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/pricegetter/pricegetter.go @@ -0,0 +1,7 @@ +package pricegetter + +import cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + +type PriceGetter interface { + cciptypes.PriceGetter +} diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go b/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go new file mode 100644 index 00000000000..71357029dd2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/rpclib/evm.go @@ -0,0 +1,337 @@ +package rpclib + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +var ErrEmptyOutput = errors.New("rpc call output is empty (make sure that the contract method exists and rpc is healthy)") + +type EvmBatchCaller interface { + // BatchCall executes all the provided EvmCall and returns the results in the same order + // of the calls. Pass blockNumber=0 to use the latest block. + BatchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) +} + +type BatchSender interface { + BatchCallContext(ctx context.Context, calls []rpc.BatchElem) error +} + +const ( + // DefaultRpcBatchSizeLimit defines the maximum number of rpc requests to be included in a batch. + DefaultRpcBatchSizeLimit = 100 + + // DefaultRpcBatchBackOffMultiplier defines the rate of reducing the batch size limit for retried calls. + // For example if limit is 20 and multiplier is 4: + // 1. 20 + // 2. 20/4 = 5 + // 3. 5/4 = 1 + DefaultRpcBatchBackOffMultiplier = 5 + + // DefaultMaxParallelRpcCalls defines the default maximum number of individual in-parallel rpc calls. + DefaultMaxParallelRpcCalls = 10 +) + +// DynamicLimitedBatchCaller makes batched rpc calls and perform retries by reducing the batch size on each retry. +type DynamicLimitedBatchCaller struct { + bc *defaultEvmBatchCaller +} + +func NewDynamicLimitedBatchCaller( + lggr logger.Logger, batchSender BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint, +) *DynamicLimitedBatchCaller { + return &DynamicLimitedBatchCaller{ + bc: newDefaultEvmBatchCaller(lggr, batchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit), + } +} + +func (c *DynamicLimitedBatchCaller) BatchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) { + return c.bc.batchCallDynamicLimitRetries(ctx, blockNumber, calls) +} + +type defaultEvmBatchCaller struct { + lggr logger.Logger + batchSender BatchSender + batchSizeLimit uint + parallelRpcCallsLimit uint + backOffMultiplier uint +} + +// NewDefaultEvmBatchCaller returns a new batch caller instance. +// batchCallLimit defines the maximum number of calls for BatchCallLimit method, pass 0 to keep the default. +// backOffMultiplier defines the back-off strategy for retries on BatchCallDynamicLimitRetries method, pass 0 to keep the default. +func newDefaultEvmBatchCaller( + lggr logger.Logger, batchSender BatchSender, batchSizeLimit, backOffMultiplier, parallelRpcCallsLimit uint, +) *defaultEvmBatchCaller { + batchSize := uint(DefaultRpcBatchSizeLimit) + if batchSizeLimit > 0 { + batchSize = batchSizeLimit + } + + multiplier := uint(DefaultRpcBatchBackOffMultiplier) + if backOffMultiplier > 0 { + multiplier = backOffMultiplier + } + + parallelRpcCalls := uint(DefaultMaxParallelRpcCalls) + if parallelRpcCallsLimit > 0 { + parallelRpcCalls = parallelRpcCallsLimit + } + + return &defaultEvmBatchCaller{ + lggr: lggr, + batchSender: batchSender, + batchSizeLimit: batchSize, + parallelRpcCallsLimit: parallelRpcCalls, + backOffMultiplier: multiplier, + } +} + +func (c *defaultEvmBatchCaller) batchCall(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) { + if len(calls) == 0 { + return nil, nil + } + + packedOutputs := make([]string, len(calls)) + rpcBatchCalls := make([]rpc.BatchElem, len(calls)) + + for i, call := range calls { + packedInputs, err := call.abi.Pack(call.methodName, call.args...) + if err != nil { + return nil, fmt.Errorf("pack %s(%+v): %w", call.methodName, call.args, err) + } + + blockNumStr := "latest" + if blockNumber > 0 { + blockNumStr = hexutil.EncodeBig(big.NewInt(0).SetUint64(blockNumber)) + } + + rpcBatchCalls[i] = rpc.BatchElem{ + Method: "eth_call", + Args: []any{ + map[string]interface{}{ + "from": common.Address{}, + "to": call.contractAddress, + "data": hexutil.Bytes(packedInputs), + }, + blockNumStr, + }, + Result: &packedOutputs[i], + } + } + + err := c.batchSender.BatchCallContext(ctx, rpcBatchCalls) + if err != nil { + return nil, fmt.Errorf("batch call context: %w", err) + } + + results := make([]DataAndErr, len(calls)) + for i, call := range calls { + if rpcBatchCalls[i].Error != nil { + results[i].Err = rpcBatchCalls[i].Error + continue + } + + if packedOutputs[i] == "" { + // Some RPCs instead of returning "0x" are returning an empty string. + // We are overriding this behaviour for consistent handling of this scenario. + packedOutputs[i] = "0x" + } + + b, err := hexutil.Decode(packedOutputs[i]) + if err != nil { + return nil, fmt.Errorf("decode result %s: packedOutputs %s: %w", call, packedOutputs[i], err) + } + + unpackedOutputs, err := call.abi.Unpack(call.methodName, b) + if err != nil { + if len(b) == 0 { + results[i].Err = fmt.Errorf("unpack result %s: %s: %w", call, err.Error(), ErrEmptyOutput) + } else { + results[i].Err = fmt.Errorf("unpack result %s: %w", call, err) + } + continue + } + + results[i].Outputs = unpackedOutputs + } + + return results, nil +} + +func (c *defaultEvmBatchCaller) batchCallDynamicLimitRetries(ctx context.Context, blockNumber uint64, calls []EvmCall) ([]DataAndErr, error) { + lim := c.batchSizeLimit + // Limit the batch size to the number of calls + if uint(len(calls)) < lim { + lim = uint(len(calls)) + } + for { + results, err := c.batchCallLimit(ctx, blockNumber, calls, lim) + if err == nil { + return results, nil + } + + if lim <= 1 { + return nil, errors.Wrapf(err, "calls %+v", EVMCallsToString(calls)) + } + + newLim := lim / c.backOffMultiplier + if newLim == 0 || newLim == lim { + newLim = 1 + } + lim = newLim + c.lggr.Errorf("retrying batch call with %d calls and %d limit that failed with error=%s", + len(calls), lim, err) + } +} + +func (c *defaultEvmBatchCaller) batchCallLimit(ctx context.Context, blockNumber uint64, calls []EvmCall, batchSizeLimit uint) ([]DataAndErr, error) { + if batchSizeLimit <= 0 { + return c.batchCall(ctx, blockNumber, calls) + } + + type job struct { + blockNumber uint64 + calls []EvmCall + results []DataAndErr + } + + jobs := make([]job, 0) + for i := 0; i < len(calls); i += int(batchSizeLimit) { + idxFrom := i + idxTo := idxFrom + int(batchSizeLimit) + if idxTo > len(calls) { + idxTo = len(calls) + } + jobs = append(jobs, job{blockNumber: blockNumber, calls: calls[idxFrom:idxTo], results: nil}) + } + + if c.parallelRpcCallsLimit > 1 { + eg := new(errgroup.Group) + eg.SetLimit(int(c.parallelRpcCallsLimit)) + for jobIdx := range jobs { + jobIdx := jobIdx + eg.Go(func() error { + res, err := c.batchCall(ctx, jobs[jobIdx].blockNumber, jobs[jobIdx].calls) + if err != nil { + return err + } + jobs[jobIdx].results = res + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + } else { + var err error + for jobIdx := range jobs { + jobs[jobIdx].results, err = c.batchCall(ctx, jobs[jobIdx].blockNumber, jobs[jobIdx].calls) + if err != nil { + return nil, err + } + } + } + + results := make([]DataAndErr, 0) + for _, jb := range jobs { + results = append(results, jb.results...) + } + return results, nil +} + +type AbiPackerUnpacker interface { + Pack(name string, args ...interface{}) ([]byte, error) + Unpack(name string, data []byte) ([]interface{}, error) +} + +type EvmCall struct { + abi AbiPackerUnpacker + methodName string + contractAddress common.Address + args []any +} + +func NewEvmCall(abi AbiPackerUnpacker, methodName string, contractAddress common.Address, args ...any) EvmCall { + return EvmCall{ + abi: abi, + methodName: methodName, + contractAddress: contractAddress, + args: args, + } +} + +func (c EvmCall) MethodName() string { + return c.methodName +} + +func (c EvmCall) String() string { + return fmt.Sprintf("%s: %s(%+v)", c.contractAddress.String(), c.methodName, c.args) +} + +func EVMCallsToString(calls []EvmCall) string { + callString := "" + for _, call := range calls { + callString += fmt.Sprintf("%s\n", call.String()) + } + return callString +} + +type DataAndErr struct { + Outputs []any + Err error +} + +func ParseOutputs[T any](results []DataAndErr, parseFunc func(d DataAndErr) (T, error)) ([]T, error) { + parsed := make([]T, 0, len(results)) + + for _, res := range results { + v, err := parseFunc(res) + if err != nil { + return nil, fmt.Errorf("parse contract output: %w", err) + } + parsed = append(parsed, v) + } + + return parsed, nil +} + +func ParseOutput[T any](dataAndErr DataAndErr, idx int) (T, error) { + var parsed T + + if dataAndErr.Err != nil { + return parsed, fmt.Errorf("rpc call error: %w", dataAndErr.Err) + } + + if idx < 0 || idx >= len(dataAndErr.Outputs) { + return parsed, fmt.Errorf("idx %d is out of bounds for %d outputs", idx, len(dataAndErr.Outputs)) + } + + res, is := dataAndErr.Outputs[idx].(T) + if !is { + // some rpc types are not strictly defined + // for that reason we try to manually map the fields using json encoding + b, err := json.Marshal(dataAndErr.Outputs[idx]) + if err == nil { + var empty T + if err := json.Unmarshal(b, &parsed); err == nil && !reflect.DeepEqual(parsed, empty) { + return parsed, nil + } + } + + return parsed, fmt.Errorf("the result type is: %T, expected: %T", dataAndErr.Outputs[idx], parsed) + } + + return res, nil +} diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go b/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go new file mode 100644 index 00000000000..1a3d7baf0fc --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/rpclib/evm_test.go @@ -0,0 +1,223 @@ +package rpclib_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + + "github.com/cometbft/cometbft/libs/rand" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client/mocks" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" +) + +func TestDefaultEvmBatchCaller_BatchCallDynamicLimit(t *testing.T) { + testCases := []struct { + name string + maxBatchSize uint + backOffMultiplier uint + numCalls int + expectedBatchSizesOnEachRetry []int + }{ + { + name: "defaults", + maxBatchSize: rpclib.DefaultRpcBatchSizeLimit, + backOffMultiplier: rpclib.DefaultRpcBatchBackOffMultiplier, + numCalls: 200, + expectedBatchSizesOnEachRetry: []int{100, 20, 4, 1}, + }, + { + name: "base simple scenario", + maxBatchSize: 20, + backOffMultiplier: 2, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{20, 10, 5, 2, 1}, + }, + { + name: "remainder", + maxBatchSize: 99, + backOffMultiplier: 5, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{99, 19, 3, 1}, + }, + { + name: "large back off multiplier", + maxBatchSize: 20, + backOffMultiplier: 18, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{20, 1}, + }, + { + name: "back off equal to batch size", + maxBatchSize: 20, + backOffMultiplier: 20, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{20, 1}, + }, + { + name: "back off larger than batch size", + maxBatchSize: 20, + backOffMultiplier: 220, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{20, 1}, + }, + { + name: "back off 1", + maxBatchSize: 20, + backOffMultiplier: 1, + numCalls: 100, + expectedBatchSizesOnEachRetry: []int{20, 1}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + batchSizes := make([]int, 0) + + ec := mocks.NewClient(t) + bc := rpclib.NewDynamicLimitedBatchCaller(logger.TestLogger(t), ec, tc.maxBatchSize, tc.backOffMultiplier, 1) + ctx := testutils.Context(t) + calls := make([]rpclib.EvmCall, tc.numCalls) + emptyAbi := abihelpers.MustParseABI("[]") + for i := range calls { + calls[i] = rpclib.NewEvmCall(emptyAbi, "", common.Address{}) + } + ec.On("BatchCallContext", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + evmCalls := args.Get(1).([]rpc.BatchElem) + batchSizes = append(batchSizes, len(evmCalls)) + }).Return(errors.New("some error")) + _, _ = bc.BatchCall(ctx, 123, calls) + + assert.Equal(t, tc.expectedBatchSizesOnEachRetry, batchSizes) + }) + } +} + +func TestDefaultEvmBatchCaller_batchCallLimit(t *testing.T) { + ctx := testutils.Context(t) + + testCases := []struct { + numCalls uint + batchSize uint + parallelRpcCallsLimit uint + }{ + {numCalls: 100, batchSize: 10, parallelRpcCallsLimit: 5}, + {numCalls: 10, batchSize: 100, parallelRpcCallsLimit: 10}, + {numCalls: 1, batchSize: 100, parallelRpcCallsLimit: 10}, + {numCalls: 1000, batchSize: 10, parallelRpcCallsLimit: 2}, + {numCalls: rand.Uint() % 1000, batchSize: rand.Uint() % 500, parallelRpcCallsLimit: rand.Uint() % 500}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { + ec := mocks.NewClient(t) + bc := rpclib.NewDynamicLimitedBatchCaller(logger.TestLogger(t), ec, tc.batchSize, 99999, tc.parallelRpcCallsLimit) + + // generate the abi and the rpc calls + intTyp, err := abi.NewType("uint64", "uint64", nil) + assert.NoError(t, err) + calls := make([]rpclib.EvmCall, tc.numCalls) + mockAbi := abihelpers.MustParseABI("[]") + for i := range calls { + name := fmt.Sprintf("method_%d", i) + meth := abi.NewMethod(name, name, abi.Function, "nonpayable", true, false, abi.Arguments{abi.Argument{Name: "a", Type: intTyp}}, abi.Arguments{abi.Argument{Name: "b", Type: intTyp}}) + mockAbi.Methods[name] = meth + calls[i] = rpclib.NewEvmCall(mockAbi, name, common.Address{}, uint64(i)) + } + + // mock the rpc call to batch call context + // for simplicity we just set an error + ec.On("BatchCallContext", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + evmCalls := args.Get(1).([]rpc.BatchElem) + for i := range evmCalls { + arg := evmCalls[i].Args[0].(map[string]interface{})["data"].(hexutil.Bytes) + arg = arg[len(arg)-10:] + evmCalls[i].Error = fmt.Errorf("%s", arg) + } + }).Return(nil) + + // make the call and make sure the results are received in order + results, _ := bc.BatchCall(ctx, 0, calls) + assert.Len(t, results, len(calls)) + for i, res := range results { + resNum, err := strconv.ParseInt(res.Err.Error()[2:], 16, 64) + assert.NoError(t, err) + assert.Equal(t, int64(i), resNum) + } + }) + } +} + +func TestParseOutput(t *testing.T) { + type testCase[T any] struct { + name string + dataAndErr rpclib.DataAndErr + outputIdx int + expRes T + expErr bool + } + + testCases := []testCase[string]{ + { + name: "success", + dataAndErr: rpclib.DataAndErr{Outputs: []any{"abc"}, Err: nil}, + outputIdx: 0, + expRes: "abc", + expErr: false, + }, + { + name: "index error on empty list", + dataAndErr: rpclib.DataAndErr{Outputs: []any{}, Err: nil}, + outputIdx: 0, + expErr: true, + }, + { + name: "index error on non-empty list", + dataAndErr: rpclib.DataAndErr{Outputs: []any{"a", "b"}, Err: nil}, + outputIdx: 2, + expErr: true, + }, + { + name: "negative index", + dataAndErr: rpclib.DataAndErr{Outputs: []any{"a", "b"}, Err: nil}, + outputIdx: -1, + expErr: true, + }, + { + name: "wrong type", + dataAndErr: rpclib.DataAndErr{Outputs: []any{1234}, Err: nil}, + outputIdx: 0, + expErr: true, + }, + { + name: "has err", + dataAndErr: rpclib.DataAndErr{Outputs: []any{"abc"}, Err: fmt.Errorf("some err")}, + outputIdx: 0, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, err := rpclib.ParseOutput[string](tc.dataAndErr, tc.outputIdx) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expRes, res) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go b/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go new file mode 100644 index 00000000000..aa42814186e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/internal/rpclib/rpclibmocks/evm_mock.go @@ -0,0 +1,97 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package rpclibmocks + +import ( + context "context" + + rpclib "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/rpclib" + mock "github.com/stretchr/testify/mock" +) + +// EvmBatchCaller is an autogenerated mock type for the EvmBatchCaller type +type EvmBatchCaller struct { + mock.Mock +} + +type EvmBatchCaller_Expecter struct { + mock *mock.Mock +} + +func (_m *EvmBatchCaller) EXPECT() *EvmBatchCaller_Expecter { + return &EvmBatchCaller_Expecter{mock: &_m.Mock} +} + +// BatchCall provides a mock function with given fields: ctx, blockNumber, calls +func (_m *EvmBatchCaller) BatchCall(ctx context.Context, blockNumber uint64, calls []rpclib.EvmCall) ([]rpclib.DataAndErr, error) { + ret := _m.Called(ctx, blockNumber, calls) + + if len(ret) == 0 { + panic("no return value specified for BatchCall") + } + + var r0 []rpclib.DataAndErr + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []rpclib.EvmCall) ([]rpclib.DataAndErr, error)); ok { + return rf(ctx, blockNumber, calls) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, []rpclib.EvmCall) []rpclib.DataAndErr); ok { + r0 = rf(ctx, blockNumber, calls) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]rpclib.DataAndErr) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, []rpclib.EvmCall) error); ok { + r1 = rf(ctx, blockNumber, calls) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EvmBatchCaller_BatchCall_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchCall' +type EvmBatchCaller_BatchCall_Call struct { + *mock.Call +} + +// BatchCall is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - calls []rpclib.EvmCall +func (_e *EvmBatchCaller_Expecter) BatchCall(ctx interface{}, blockNumber interface{}, calls interface{}) *EvmBatchCaller_BatchCall_Call { + return &EvmBatchCaller_BatchCall_Call{Call: _e.mock.On("BatchCall", ctx, blockNumber, calls)} +} + +func (_c *EvmBatchCaller_BatchCall_Call) Run(run func(ctx context.Context, blockNumber uint64, calls []rpclib.EvmCall)) *EvmBatchCaller_BatchCall_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]rpclib.EvmCall)) + }) + return _c +} + +func (_c *EvmBatchCaller_BatchCall_Call) Return(_a0 []rpclib.DataAndErr, _a1 error) *EvmBatchCaller_BatchCall_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EvmBatchCaller_BatchCall_Call) RunAndReturn(run func(context.Context, uint64, []rpclib.EvmCall) ([]rpclib.DataAndErr, error)) *EvmBatchCaller_BatchCall_Call { + _c.Call.Return(run) + return _c +} + +// NewEvmBatchCaller creates a new instance of EvmBatchCaller. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEvmBatchCaller(t interface { + mock.TestingT + Cleanup(func()) +}) *EvmBatchCaller { + mock := &EvmBatchCaller{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/metrics.go b/core/services/ocr2/plugins/ccip/metrics.go new file mode 100644 index 00000000000..f481b5d447d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/metrics.go @@ -0,0 +1,99 @@ +package ccip + +import ( + "strconv" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + unexpiredCommitRoots = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ccip_unexpired_commit_roots", + Help: "Number of unexpired commit roots processed by the plugin", + }, []string{"plugin", "source", "dest"}) + messagesProcessed = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ccip_number_of_messages_processed", + Help: "Number of messages processed by the plugin during different OCR phases", + }, []string{"plugin", "source", "dest", "ocrPhase"}) + sequenceNumberCounter = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ccip_sequence_number_counter", + Help: "Sequence number of the last message processed by the plugin", + }, []string{"plugin", "source", "dest", "ocrPhase"}) +) + +type ocrPhase string + +const ( + Observation ocrPhase = "observation" + Report ocrPhase = "report" + ShouldAccept ocrPhase = "shouldAccept" +) + +type PluginMetricsCollector interface { + NumberOfMessagesProcessed(phase ocrPhase, count int) + NumberOfMessagesBasedOnInterval(phase ocrPhase, seqNrMin, seqNrMax uint64) + UnexpiredCommitRoots(count int) + SequenceNumber(phase ocrPhase, seqNr uint64) +} + +type pluginMetricsCollector struct { + pluginName string + source, dest string +} + +func NewPluginMetricsCollector(pluginLabel string, sourceChainId, destChainId int64) *pluginMetricsCollector { + return &pluginMetricsCollector{ + pluginName: pluginLabel, + source: strconv.FormatInt(sourceChainId, 10), + dest: strconv.FormatInt(destChainId, 10), + } +} + +func (p *pluginMetricsCollector) NumberOfMessagesProcessed(phase ocrPhase, count int) { + messagesProcessed. + WithLabelValues(p.pluginName, p.source, p.dest, string(phase)). + Set(float64(count)) +} + +func (p *pluginMetricsCollector) NumberOfMessagesBasedOnInterval(phase ocrPhase, seqNrMin, seqNrMax uint64) { + messagesProcessed. + WithLabelValues(p.pluginName, p.source, p.dest, string(phase)). + Set(float64(seqNrMax - seqNrMin + 1)) +} + +func (p *pluginMetricsCollector) UnexpiredCommitRoots(count int) { + unexpiredCommitRoots. + WithLabelValues(p.pluginName, p.source, p.dest). + Set(float64(count)) +} + +func (p *pluginMetricsCollector) SequenceNumber(phase ocrPhase, seqNr uint64) { + // Don't publish price reports + if seqNr == 0 { + return + } + + sequenceNumberCounter. + WithLabelValues(p.pluginName, p.source, p.dest, string(phase)). + Set(float64(seqNr)) +} + +var ( + // NoopMetricsCollector is a no-op implementation of PluginMetricsCollector + NoopMetricsCollector PluginMetricsCollector = noop{} +) + +type noop struct{} + +func (d noop) NumberOfMessagesProcessed(ocrPhase, int) { +} + +func (d noop) NumberOfMessagesBasedOnInterval(ocrPhase, uint64, uint64) { +} + +func (d noop) UnexpiredCommitRoots(int) { +} + +func (d noop) SequenceNumber(ocrPhase, uint64) { +} diff --git a/core/services/ocr2/plugins/ccip/metrics_test.go b/core/services/ocr2/plugins/ccip/metrics_test.go new file mode 100644 index 00000000000..eec67db7dd0 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/metrics_test.go @@ -0,0 +1,47 @@ +package ccip + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" +) + +const ( + sourceChainId = 1337 + destChainId = 2337 +) + +func Test_SequenceNumbers(t *testing.T) { + collector := NewPluginMetricsCollector("test", sourceChainId, destChainId) + + collector.SequenceNumber(Report, 10) + assert.Equal(t, float64(10), testutil.ToFloat64(sequenceNumberCounter.WithLabelValues("test", "1337", "2337", "report"))) + + collector.SequenceNumber(Report, 0) + assert.Equal(t, float64(10), testutil.ToFloat64(sequenceNumberCounter.WithLabelValues("test", "1337", "2337", "report"))) +} + +func Test_NumberOfMessages(t *testing.T) { + collector := NewPluginMetricsCollector("test", sourceChainId, destChainId) + collector2 := NewPluginMetricsCollector("test2", destChainId, sourceChainId) + + collector.NumberOfMessagesBasedOnInterval(Observation, 1, 10) + assert.Equal(t, float64(10), testutil.ToFloat64(messagesProcessed.WithLabelValues("test", "1337", "2337", "observation"))) + + collector.NumberOfMessagesBasedOnInterval(Report, 5, 30) + assert.Equal(t, float64(26), testutil.ToFloat64(messagesProcessed.WithLabelValues("test", "1337", "2337", "report"))) + + collector2.NumberOfMessagesProcessed(Report, 15) + assert.Equal(t, float64(15), testutil.ToFloat64(messagesProcessed.WithLabelValues("test2", "2337", "1337", "report"))) +} + +func Test_UnexpiredCommitRoots(t *testing.T) { + collector := NewPluginMetricsCollector("test", sourceChainId, destChainId) + + collector.UnexpiredCommitRoots(10) + assert.Equal(t, float64(10), testutil.ToFloat64(unexpiredCommitRoots.WithLabelValues("test", "1337", "2337"))) + + collector.UnexpiredCommitRoots(5) + assert.Equal(t, float64(5), testutil.ToFloat64(unexpiredCommitRoots.WithLabelValues("test", "1337", "2337"))) +} diff --git a/core/services/ocr2/plugins/ccip/observations.go b/core/services/ocr2/plugins/ccip/observations.go new file mode 100644 index 00000000000..f79d667a550 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/observations.go @@ -0,0 +1,149 @@ +package ccip + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/smartcontractkit/libocr/commontypes" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +// Note if a breaking change is introduced to this struct nodes running different versions +// will not be able to unmarshal each other's observations. Do not modify unless you +// know what you are doing. +type CommitObservation struct { + Interval cciptypes.CommitStoreInterval `json:"interval"` + TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"` + SourceGasPriceUSD *big.Int `json:"sourceGasPrice"` // Deprecated + SourceGasPriceUSDPerChain map[uint64]*big.Int `json:"sourceGasPriceUSDPerChain"` +} + +// Marshal MUST be used instead of raw json.Marshal(o) since it contains backwards compatibility related changes. +func (o CommitObservation) Marshal() ([]byte, error) { + obsCopy := o + + // Similar to: commitObservationJSONBackComp but for commit observation marshaling. + tokenPricesUSD := make(map[cciptypes.Address]*big.Int, len(obsCopy.TokenPricesUSD)) + for k, v := range obsCopy.TokenPricesUSD { + tokenPricesUSD[cciptypes.Address(strings.ToLower(string(k)))] = v + } + obsCopy.TokenPricesUSD = tokenPricesUSD + + return json.Marshal(&obsCopy) +} + +// ExecutionObservation stores messages as a map pointing from a sequence number (uint) to the message payload (MsgData) +// Having it structured this way is critical because: +// * it prevents having duplicated sequence numbers within a single ExecutionObservation (compared to the list representation) +// * prevents malicious actors from passing multiple messages with the same sequence number +// Note if a breaking change is introduced to this struct nodes running different versions +// will not be able to unmarshal each other's observations. Do not modify unless you +// know what you are doing. +type ExecutionObservation struct { + Messages map[uint64]MsgData `json:"messages"` +} + +type MsgData struct { + TokenData [][]byte `json:"tokenData"` +} + +// ObservedMessage is a transient struct used for processing convenience within the plugin. It's easier to process observed messages +// when all properties are flattened into a single structure. +// It should not be serialized and returned from types.ReportingPlugin functions, please serialize/deserialize to/from ExecutionObservation instead using NewObservedMessage +type ObservedMessage struct { + SeqNr uint64 + MsgData +} + +func NewExecutionObservation(observations []ObservedMessage) ExecutionObservation { + denormalized := make(map[uint64]MsgData, len(observations)) + for _, o := range observations { + denormalized[o.SeqNr] = MsgData{TokenData: o.TokenData} + } + return ExecutionObservation{Messages: denormalized} +} + +func NewObservedMessage(seqNr uint64, tokenData [][]byte) ObservedMessage { + return ObservedMessage{ + SeqNr: seqNr, + MsgData: MsgData{TokenData: tokenData}, + } +} + +func (o ExecutionObservation) Marshal() ([]byte, error) { + return json.Marshal(&o) +} + +// GetParsableObservations checks the given observations for formatting and value errors. +// It returns all valid observations, potentially being an empty list. It will log +// malformed observations but never error. +// +// GetParsableObservations MUST be used instead of raw json.Unmarshal(o) since it contains backwards compatibility changes. +func GetParsableObservations[O CommitObservation | ExecutionObservation](l logger.Logger, observations []types.AttributedObservation) []O { + var parseableObservations []O + var observers []commontypes.OracleID + for _, ao := range observations { + if len(ao.Observation) == 0 { + // Empty observation + l.Infow("Discarded empty observation", "observer", ao.Observer) + continue + } + var ob O + var err error + obsJSON := ao.Observation + + switch any(ob).(type) { + case CommitObservation: + commitObservation, err1 := commitObservationJSONBackComp(ao.Observation) + if err1 != nil { + l.Errorw("commit observation json backwards compatibility format failed", "err", err, + "observation", string(ao.Observation), "observer", ao.Observer) + continue + } + ob = any(commitObservation).(O) + default: + err = json.Unmarshal(obsJSON, &ob) + if err != nil { + l.Errorw("Received unmarshallable observation", "err", err, "observation", string(ao.Observation), "observer", ao.Observer) + continue + } + } + + parseableObservations = append(parseableObservations, ob) + observers = append(observers, ao.Observer) + } + l.Infow( + "Parsed observations", + "observers", observers, + "observersLength", len(observers), + "observationsLength", len(parseableObservations), + "rawObservationLength", len(observations), + ) + return parseableObservations +} + +// For backwards compatibility, converts token prices to eip55. +// Prior to cciptypes.Address we were using go-ethereum common.Address type which is +// marshalled to lower-case while the string representation we used was eip55. +// Nodes that run different ccip version should generate the same observations. +func commitObservationJSONBackComp(obsJson []byte) (CommitObservation, error) { + var obs CommitObservation + err := json.Unmarshal(obsJson, &obs) + if err != nil { + return CommitObservation{}, fmt.Errorf("unmarshal observation: %w", err) + } + tokenPricesUSD := make(map[cciptypes.Address]*big.Int, len(obs.TokenPricesUSD)) + for k, v := range obs.TokenPricesUSD { + tokenPricesUSD[ccipcalc.HexToAddress(string(k))] = v + } + obs.TokenPricesUSD = tokenPricesUSD + return obs, nil +} diff --git a/core/services/ocr2/plugins/ccip/observations_test.go b/core/services/ocr2/plugins/ccip/observations_test.go new file mode 100644 index 00000000000..a3143f157d7 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/observations_test.go @@ -0,0 +1,305 @@ +package ccip + +import ( + "encoding/json" + "math/big" + "strings" + "testing" + + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" +) + +func TestObservationFilter(t *testing.T) { + lggr := logger.TestLogger(t) + obs1 := CommitObservation{Interval: cciptypes.CommitStoreInterval{Min: 1, Max: 10}} + b1, err := obs1.Marshal() + require.NoError(t, err) + nonEmpty := GetParsableObservations[CommitObservation](lggr, []types.AttributedObservation{{Observation: b1}, {Observation: []byte{}}}) + require.Equal(t, 1, len(nonEmpty)) + assert.Equal(t, nonEmpty[0].Interval, obs1.Interval) +} + +// This is the observation format up to 1.4.16 release +type CommitObservationLegacy struct { + Interval cciptypes.CommitStoreInterval `json:"interval"` + TokenPricesUSD map[cciptypes.Address]*big.Int `json:"tokensPerFeeCoin"` + SourceGasPriceUSD *big.Int `json:"sourceGasPrice"` +} + +func TestObservationCompat_MultiChainGas(t *testing.T) { + obsLegacy := CommitObservationLegacy{ + Interval: cciptypes.CommitStoreInterval{ + Min: 1, + Max: 12, + }, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ccipcalc.HexToAddress("0x1"): big.NewInt(1)}, + SourceGasPriceUSD: big.NewInt(3)} + bL, err := json.Marshal(obsLegacy) + require.NoError(t, err) + obsNew := CommitObservation{ + Interval: cciptypes.CommitStoreInterval{ + Min: 1, + Max: 12, + }, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ccipcalc.HexToAddress("0x1"): big.NewInt(1)}, + SourceGasPriceUSD: big.NewInt(3), + } + bN, err := json.Marshal(obsNew) + require.NoError(t, err) + + observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: bL}, {Observation: bN}}) + + assert.Equal(t, 2, len(observations)) + assert.Equal(t, observations[0], observations[1]) +} + +func TestCommitObservationJsonDeserialization(t *testing.T) { + expectedObservation := CommitObservation{ + Interval: cciptypes.CommitStoreInterval{ + Min: 1, + Max: 12, + }, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + ccipcalc.HexToAddress("0x1"): big.NewInt(1)}, + SourceGasPriceUSD: big.NewInt(3), + } + + json := `{ + "interval": { + "Min":1, + "Max":12 + }, + "tokensPerFeeCoin": { + "0x0000000000000000000000000000000000000001": 1 + }, + "sourceGasPrice": 3 + }` + + observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: []byte(json)}}) + assert.Equal(t, 1, len(observations)) + assert.Equal(t, expectedObservation, observations[0]) +} + +func TestCommitObservationMarshal(t *testing.T) { + obs := CommitObservation{ + Interval: cciptypes.CommitStoreInterval{ + Min: 1, + Max: 12, + }, + TokenPricesUSD: map[cciptypes.Address]*big.Int{"0xAaAaAa": big.NewInt(1)}, + SourceGasPriceUSD: big.NewInt(3), + SourceGasPriceUSDPerChain: map[uint64]*big.Int{123: big.NewInt(3)}, + } + + b, err := obs.Marshal() + require.NoError(t, err) + assert.Equal(t, `{"interval":{"Min":1,"Max":12},"tokensPerFeeCoin":{"0xaaaaaa":1},"sourceGasPrice":3,"sourceGasPriceUSDPerChain":{"123":3}}`, string(b)) + + // Make sure that the call to Marshal did not alter the original observation object. + assert.Len(t, obs.TokenPricesUSD, 1) + _, exists := obs.TokenPricesUSD["0xAaAaAa"] + assert.True(t, exists) + _, exists = obs.TokenPricesUSD["0xaaaaaa"] + assert.False(t, exists) + + assert.Len(t, obs.SourceGasPriceUSDPerChain, 1) + _, exists = obs.SourceGasPriceUSDPerChain[123] + assert.True(t, exists) +} + +func TestExecutionObservationJsonDeserialization(t *testing.T) { + expectedObservation := ExecutionObservation{Messages: map[uint64]MsgData{ + 2: {TokenData: tokenData("c")}, + 1: {TokenData: tokenData("c")}, + }} + + // ["YQ=="] is "a" + // ["Yw=="] is "c" + json := `{ + "messages": { + "2":{"tokenData":["YQ=="]}, + "1":{"tokenData":["Yw=="]}, + "2":{"tokenData":["Yw=="]} + } + }` + + observations := GetParsableObservations[ExecutionObservation](logger.TestLogger(t), []types.AttributedObservation{{Observation: []byte(json)}}) + assert.Equal(t, 1, len(observations)) + assert.Equal(t, 2, len(observations[0].Messages)) + assert.Equal(t, expectedObservation, observations[0]) +} + +func TestObservationSize(t *testing.T) { + testParams := gopter.DefaultTestParameters() + testParams.MinSuccessfulTests = 100 + p := gopter.NewProperties(testParams) + p.Property("bounded observation size", prop.ForAll(func(min, max uint64) bool { + o := NewExecutionObservation( + []ObservedMessage{ + { + SeqNr: min, + MsgData: MsgData{}, + }, + { + SeqNr: max, + MsgData: MsgData{}, + }, + }, + ) + b, err := o.Marshal() + require.NoError(t, err) + return len(b) <= MaxObservationLength + }, gen.UInt64(), gen.UInt64())) + p.TestingRun(t) +} + +func TestNewExecutionObservation(t *testing.T) { + tests := []struct { + name string + observations []ObservedMessage + want ExecutionObservation + }{ + { + name: "nil observations", + observations: nil, + want: ExecutionObservation{Messages: map[uint64]MsgData{}}, + }, + { + name: "empty observations", + observations: []ObservedMessage{}, + want: ExecutionObservation{Messages: map[uint64]MsgData{}}, + }, + { + name: "observations with different sequence numbers", + observations: []ObservedMessage{ + NewObservedMessage(1, tokenData("a")), + NewObservedMessage(2, tokenData("b")), + NewObservedMessage(3, tokenData("c")), + }, + want: ExecutionObservation{ + Messages: map[uint64]MsgData{ + 1: {TokenData: tokenData("a")}, + 2: {TokenData: tokenData("b")}, + 3: {TokenData: tokenData("c")}, + }, + }, + }, + { + name: "last one wins in case of duplicates", + observations: []ObservedMessage{ + NewObservedMessage(1, tokenData("a")), + NewObservedMessage(1, tokenData("b")), + NewObservedMessage(1, tokenData("c")), + }, + want: ExecutionObservation{ + Messages: map[uint64]MsgData{ + 1: {TokenData: tokenData("c")}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, NewExecutionObservation(tt.observations), "NewExecutionObservation(%v)", tt.observations) + }) + } +} + +func tokenData(value string) [][]byte { + return [][]byte{[]byte(value)} +} + +func TestCommitObservationJsonSerializationDeserialization(t *testing.T) { + jsonEncoded := `{ + "interval": { + "Min":1, + "Max":12 + }, + "tokensPerFeeCoin": { + "0x0000000000000000000000000000000000000001": 1, + "0x507877C2E26f1387432D067D2DaAfa7d0420d90a": 2, + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": 3 + }, + "sourceGasPrice": 3, + "sourceGasPriceUSDPerChain": { + "123":3 + } + }` + + expectedObservation := CommitObservation{ + Interval: cciptypes.CommitStoreInterval{ + Min: 1, + Max: 12, + }, + TokenPricesUSD: map[cciptypes.Address]*big.Int{ + cciptypes.Address("0x0000000000000000000000000000000000000001"): big.NewInt(1), + cciptypes.Address("0x507877C2E26f1387432D067D2DaAfa7d0420d90a"): big.NewInt(2), // json eip55->eip55 parsed + cciptypes.Address("0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa"): big.NewInt(3), // json lower->eip55 parsed + }, + SourceGasPriceUSD: big.NewInt(3), + SourceGasPriceUSDPerChain: map[uint64]*big.Int{ + 123: big.NewInt(3), + }, + } + + observations := GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{ + {Observation: []byte(jsonEncoded)}, + }) + assert.Equal(t, 1, len(observations)) + assert.Equal(t, expectedObservation, observations[0]) + + backToJson, err := expectedObservation.Marshal() + // we expect the json encoded addresses to be lower-case + exp := strings.ReplaceAll( + jsonEncoded, "0x507877C2E26f1387432D067D2DaAfa7d0420d90a", strings.ToLower("0x507877C2E26f1387432D067D2DaAfa7d0420d90a")) + assert.NoError(t, err) + assert.JSONEq(t, exp, string(backToJson)) + + // and we expect to get the same results after we parse the lower-case addresses + observations = GetParsableObservations[CommitObservation](logger.TestLogger(t), []types.AttributedObservation{ + {Observation: []byte(jsonEncoded)}, + }) + assert.Equal(t, 1, len(observations)) + assert.Equal(t, expectedObservation, observations[0]) +} + +func TestAddressEncodingBackwardsCompatibility(t *testing.T) { + // The intention of this test is to remind including proper formatting of addresses after config is updated. + // + // The following tests will fail when a new cciptypes.Address field is added or removed. + // If you notice that the test is failing, make sure to apply proper address formatting + // after the struct is marshalled/unmarshalled and then include your new field in the expected fields slice to + // make this test pass or if you removed a field, remove it from the expected fields slice. + + t.Run("job spec config", func(t *testing.T) { + exp := []string{"ccip.Address OffRamp"} + + fields := testhelpers.FindStructFieldsOfCertainType( + "ccip.Address", + config.CommitPluginJobSpecConfig{PriceGetterConfig: &config.DynamicPriceGetterConfig{}}, + ) + assert.Equal(t, exp, fields) + }) + + t.Run("commit observation", func(t *testing.T) { + exp := []string{"map[ccip.Address]*big.Int TokenPricesUSD"} + + fields := testhelpers.FindStructFieldsOfCertainType( + "ccip.Address", + CommitObservation{SourceGasPriceUSD: big.NewInt(0)}, + ) + assert.Equal(t, exp, fields) + }) +} diff --git a/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go b/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go new file mode 100644 index 00000000000..c334f159fd2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/pkg/leafer/leafer.go @@ -0,0 +1,61 @@ +package leafer + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +// LeafHasher converts a CCIPSendRequested event into something that can be hashed and hashes it. +type LeafHasher interface { + HashLeaf(log types.Log) ([32]byte, error) +} + +// Version is the contract to use. +type Version string + +const ( + V1_0_0 Version = "v1_0_0" + V1_2_0 Version = "v1_2_0" + V1_5_0 Version = "v1_5_0" +) + +// MakeLeafHasher is a factory function to construct the onramp implementing the HashLeaf function for a given version. +func MakeLeafHasher(ver Version, cl bind.ContractBackend, sourceChainSelector uint64, destChainSelector uint64, onRampId common.Address, ctx hashutil.Hasher[[32]byte]) (LeafHasher, error) { + switch ver { + case V1_0_0: + or, err := evm_2_evm_onramp_1_0_0.NewEVM2EVMOnRamp(onRampId, cl) + if err != nil { + return nil, err + } + h := v1_0_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or) + return h, nil + case V1_2_0: + or, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampId, cl) + if err != nil { + return nil, err + } + h := v1_2_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or) + return h, nil + case V1_5_0: + or, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampId, cl) + if err != nil { + return nil, err + } + h := v1_5_0.NewLeafHasher(sourceChainSelector, destChainSelector, onRampId, ctx, or) + return h, nil + default: + return nil, fmt.Errorf("unknown version %q", ver) + } +} diff --git a/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go new file mode 100644 index 00000000000..7c75b9bdd99 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/da_price_estimator.go @@ -0,0 +1,176 @@ +package prices + +import ( + "context" + "fmt" + "math/big" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +type DAGasPriceEstimator struct { + execEstimator GasPriceEstimator + l1Oracle rollups.L1Oracle + priceEncodingLength uint + daDeviationPPB int64 + daOverheadGas int64 + gasPerDAByte int64 + daMultiplier int64 +} + +func NewDAGasPriceEstimator( + estimator gas.EvmFeeEstimator, + maxGasPrice *big.Int, + deviationPPB int64, + daDeviationPPB int64, +) *DAGasPriceEstimator { + return &DAGasPriceEstimator{ + execEstimator: NewExecGasPriceEstimator(estimator, maxGasPrice, deviationPPB), + l1Oracle: estimator.L1Oracle(), + priceEncodingLength: daGasPriceEncodingLength, + daDeviationPPB: daDeviationPPB, + } +} + +func (g DAGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) { + execGasPrice, err := g.execEstimator.GetGasPrice(ctx) + if err != nil { + return nil, err + } + var gasPrice *big.Int = execGasPrice + if gasPrice.BitLen() > int(g.priceEncodingLength) { + return nil, fmt.Errorf("native gas price exceeded max range %+v", gasPrice) + } + + if g.l1Oracle == nil { + return gasPrice, nil + } + + daGasPriceWei, err := g.l1Oracle.GasPrice(ctx) + if err != nil { + return nil, err + } + + if daGasPrice := daGasPriceWei.ToInt(); daGasPrice.Cmp(big.NewInt(0)) > 0 { + if daGasPrice.BitLen() > int(g.priceEncodingLength) { + return nil, fmt.Errorf("data availability gas price exceeded max range %+v", daGasPrice) + } + + daGasPrice = new(big.Int).Lsh(daGasPrice, g.priceEncodingLength) + gasPrice = new(big.Int).Add(gasPrice, daGasPrice) + } + + return gasPrice, nil +} + +func (g DAGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) { + daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(p) + if err != nil { + return nil, err + } + + // This assumes l1GasPrice is priced using the same native token as l2 native + daUSD := ccipcalc.CalculateUsdPerUnitGas(daGasPrice, wrappedNativePrice) + if daUSD.BitLen() > int(g.priceEncodingLength) { + return nil, fmt.Errorf("data availability gas price USD exceeded max range %+v", daUSD) + } + execUSD := ccipcalc.CalculateUsdPerUnitGas(execGasPrice, wrappedNativePrice) + if execUSD.BitLen() > int(g.priceEncodingLength) { + return nil, fmt.Errorf("exec gas price USD exceeded max range %+v", execUSD) + } + + daUSD = new(big.Int).Lsh(daUSD, g.priceEncodingLength) + return new(big.Int).Add(daUSD, execUSD), nil +} + +func (g DAGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) { + daPrices := make([]*big.Int, len(gasPrices)) + execPrices := make([]*big.Int, len(gasPrices)) + + for i := range gasPrices { + daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(gasPrices[i]) + if err != nil { + return nil, err + } + + daPrices[i] = daGasPrice + execPrices[i] = execGasPrice + } + + daMedian := ccipcalc.BigIntSortedMiddle(daPrices) + execMedian := ccipcalc.BigIntSortedMiddle(execPrices) + + daMedian = new(big.Int).Lsh(daMedian, g.priceEncodingLength) + return new(big.Int).Add(daMedian, execMedian), nil +} + +func (g DAGasPriceEstimator) Deviates(p1, p2 *big.Int) (bool, error) { + p1DAGasPrice, p1ExecGasPrice, err := g.parseEncodedGasPrice(p1) + if err != nil { + return false, err + } + p2DAGasPrice, p2ExecGasPrice, err := g.parseEncodedGasPrice(p2) + if err != nil { + return false, err + } + + execDeviates, err := g.execEstimator.Deviates(p1ExecGasPrice, p2ExecGasPrice) + if err != nil { + return false, err + } + if execDeviates { + return execDeviates, nil + } + + return ccipcalc.Deviates(p1DAGasPrice, p2DAGasPrice, g.daDeviationPPB), nil +} + +func (g DAGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) { + daGasPrice, execGasPrice, err := g.parseEncodedGasPrice(p) + if err != nil { + return nil, err + } + + execCostUSD, err := g.execEstimator.EstimateMsgCostUSD(execGasPrice, wrappedNativePrice, msg) + if err != nil { + return nil, err + } + + // If there is data availability price component, then include data availability cost in fee estimation + if daGasPrice.Cmp(big.NewInt(0)) > 0 { + daGasCostUSD := g.estimateDACostUSD(daGasPrice, wrappedNativePrice, msg) + execCostUSD = new(big.Int).Add(daGasCostUSD, execCostUSD) + } + return execCostUSD, nil +} + +func (g DAGasPriceEstimator) parseEncodedGasPrice(p *big.Int) (*big.Int, *big.Int, error) { + if p.BitLen() > int(g.priceEncodingLength*2) { + return nil, nil, fmt.Errorf("encoded gas price exceeded max range %+v", p) + } + + daGasPrice := new(big.Int).Rsh(p, g.priceEncodingLength) + + daStart := new(big.Int).Lsh(big.NewInt(1), g.priceEncodingLength) + execGasPrice := new(big.Int).Mod(p, daStart) + + return daGasPrice, execGasPrice, nil +} + +func (g DAGasPriceEstimator) estimateDACostUSD(daGasPrice *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int { + var sourceTokenDataLen int + for _, tokenData := range msg.SourceTokenData { + sourceTokenDataLen += len(tokenData) + } + + dataLen := evmMessageFixedBytes + len(msg.Data) + len(msg.TokenAmounts)*evmMessageBytesPerToken + sourceTokenDataLen + dataGas := big.NewInt(int64(dataLen)*g.gasPerDAByte + g.daOverheadGas) + + dataGasEstimate := new(big.Int).Mul(dataGas, daGasPrice) + dataGasEstimate = new(big.Int).Div(new(big.Int).Mul(dataGasEstimate, big.NewInt(g.daMultiplier)), big.NewInt(daMultiplierBase)) + + return ccipcalc.CalculateUsdPerUnitGas(dataGasEstimate, wrappedNativePrice) +} diff --git a/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go b/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go new file mode 100644 index 00000000000..2f8616a8669 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/da_price_estimator_test.go @@ -0,0 +1,440 @@ +package prices + +import ( + "context" + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/rollups/mocks" +) + +func encodeGasPrice(daPrice, execPrice *big.Int) *big.Int { + return new(big.Int).Add(new(big.Int).Lsh(daPrice, daGasPriceEncodingLength), execPrice) +} + +func TestDAPriceEstimator_GetGasPrice(t *testing.T) { + ctx := context.Background() + + testCases := []struct { + name string + daGasPrice *big.Int + execGasPrice *big.Int + expPrice *big.Int + expErr bool + }{ + { + name: "base", + daGasPrice: big.NewInt(1), + execGasPrice: big.NewInt(0), + expPrice: encodeGasPrice(big.NewInt(1), big.NewInt(0)), + expErr: false, + }, + { + name: "large values", + daGasPrice: big.NewInt(1e9), // 1 gwei + execGasPrice: big.NewInt(200e9), // 200 gwei + expPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(200e9)), + expErr: false, + }, + { + name: "zero DA price", + daGasPrice: big.NewInt(0), + execGasPrice: big.NewInt(200e9), + expPrice: encodeGasPrice(big.NewInt(0), big.NewInt(200e9)), + expErr: false, + }, + { + name: "zero exec price", + daGasPrice: big.NewInt(1e9), + execGasPrice: big.NewInt(0), + expPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), + expErr: false, + }, + { + name: "price out of bounds", + daGasPrice: new(big.Int).Lsh(big.NewInt(1), daGasPriceEncodingLength), + execGasPrice: big.NewInt(1), + expPrice: nil, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + execEstimator := NewMockGasPriceEstimator(t) + execEstimator.On("GetGasPrice", ctx).Return(tc.execGasPrice, nil) + + l1Oracle := mocks.NewL1Oracle(t) + l1Oracle.On("GasPrice", ctx).Return(assets.NewWei(tc.daGasPrice), nil) + + g := DAGasPriceEstimator{ + execEstimator: execEstimator, + l1Oracle: l1Oracle, + priceEncodingLength: daGasPriceEncodingLength, + } + + gasPrice, err := g.GetGasPrice(ctx) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expPrice, gasPrice) + }) + } + + t.Run("nil L1 oracle", func(t *testing.T) { + expPrice := big.NewInt(1) + + execEstimator := NewMockGasPriceEstimator(t) + execEstimator.On("GetGasPrice", ctx).Return(expPrice, nil) + + g := DAGasPriceEstimator{ + execEstimator: execEstimator, + l1Oracle: nil, + priceEncodingLength: daGasPriceEncodingLength, + } + + gasPrice, err := g.GetGasPrice(ctx) + assert.NoError(t, err) + assert.Equal(t, expPrice, gasPrice) + }) +} + +func TestDAPriceEstimator_DenoteInUSD(t *testing.T) { + val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) } + + testCases := []struct { + name string + gasPrice *big.Int + nativePrice *big.Int + expPrice *big.Int + }{ + { + name: "base", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(10e9)), + nativePrice: val1e18(2_000), + expPrice: encodeGasPrice(big.NewInt(2000e9), big.NewInt(20000e9)), + }, + { + name: "low price truncates to 0", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(10e9)), + nativePrice: big.NewInt(1), + expPrice: big.NewInt(0), + }, + { + name: "high price", + gasPrice: encodeGasPrice(val1e18(1), val1e18(10)), + nativePrice: val1e18(2000), + expPrice: encodeGasPrice(val1e18(2_000), val1e18(20_000)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := DAGasPriceEstimator{ + priceEncodingLength: daGasPriceEncodingLength, + } + + gasPrice, err := g.DenoteInUSD(tc.gasPrice, tc.nativePrice) + assert.NoError(t, err) + assert.True(t, tc.expPrice.Cmp(gasPrice) == 0) + }) + } +} + +func TestDAPriceEstimator_Median(t *testing.T) { + val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) } + + testCases := []struct { + name string + gasPrices []*big.Int + expMedian *big.Int + }{ + { + name: "base", + gasPrices: []*big.Int{ + encodeGasPrice(big.NewInt(1), big.NewInt(1)), + encodeGasPrice(big.NewInt(2), big.NewInt(2)), + encodeGasPrice(big.NewInt(3), big.NewInt(3)), + }, + expMedian: encodeGasPrice(big.NewInt(2), big.NewInt(2)), + }, + { + name: "median 2", + gasPrices: []*big.Int{ + encodeGasPrice(big.NewInt(1), big.NewInt(1)), + encodeGasPrice(big.NewInt(2), big.NewInt(2)), + }, + expMedian: encodeGasPrice(big.NewInt(2), big.NewInt(2)), + }, + { + name: "large values", + gasPrices: []*big.Int{ + encodeGasPrice(val1e18(5), val1e18(5)), + encodeGasPrice(val1e18(4), val1e18(4)), + encodeGasPrice(val1e18(3), val1e18(3)), + encodeGasPrice(val1e18(2), val1e18(2)), + encodeGasPrice(val1e18(1), val1e18(1)), + }, + expMedian: encodeGasPrice(val1e18(3), val1e18(3)), + }, + { + name: "zeros", + gasPrices: []*big.Int{big.NewInt(0), big.NewInt(0), big.NewInt(0)}, + expMedian: big.NewInt(0), + }, + { + name: "picks median of each price component individually", + gasPrices: []*big.Int{ + encodeGasPrice(val1e18(1), val1e18(3)), + encodeGasPrice(val1e18(2), val1e18(2)), + encodeGasPrice(val1e18(3), val1e18(1)), + }, + expMedian: encodeGasPrice(val1e18(2), val1e18(2)), + }, + { + name: "unsorted even number of price components", + gasPrices: []*big.Int{ + encodeGasPrice(val1e18(1), val1e18(22)), + encodeGasPrice(val1e18(4), val1e18(33)), + encodeGasPrice(val1e18(2), val1e18(44)), + encodeGasPrice(val1e18(3), val1e18(11)), + }, + expMedian: encodeGasPrice(val1e18(3), val1e18(33)), + }, + { + name: "equal DA price components", + gasPrices: []*big.Int{ + encodeGasPrice(val1e18(2), val1e18(22)), + encodeGasPrice(val1e18(2), val1e18(33)), + encodeGasPrice(val1e18(2), val1e18(44)), + encodeGasPrice(val1e18(2), val1e18(11)), + }, + expMedian: encodeGasPrice(val1e18(2), val1e18(33)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := DAGasPriceEstimator{ + priceEncodingLength: daGasPriceEncodingLength, + } + + gasPrice, err := g.Median(tc.gasPrices) + assert.NoError(t, err) + assert.True(t, tc.expMedian.Cmp(gasPrice) == 0) + }) + } +} + +func TestDAPriceEstimator_Deviates(t *testing.T) { + testCases := []struct { + name string + gasPrice1 *big.Int + gasPrice2 *big.Int + daDeviationPPB int64 + execDeviationPPB int64 + expDeviates bool + }{ + { + name: "base", + gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(79e8), big.NewInt(79e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: true, + }, + { + name: "negative difference also deviates", + gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(121e8), big.NewInt(121e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: true, + }, + { + name: "only DA component deviates", + gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(150e8), big.NewInt(110e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: true, + }, + { + name: "only exec component deviates", + gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(110e8), big.NewInt(150e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: true, + }, + { + name: "both do not deviate", + gasPrice1: encodeGasPrice(big.NewInt(100e8), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(110e8), big.NewInt(110e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: false, + }, + { + name: "zero DA price and exec deviates", + gasPrice1: encodeGasPrice(big.NewInt(0), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(0), big.NewInt(121e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: true, + }, + { + name: "zero DA price and exec does not deviate", + gasPrice1: encodeGasPrice(big.NewInt(0), big.NewInt(100e8)), + gasPrice2: encodeGasPrice(big.NewInt(0), big.NewInt(110e8)), + daDeviationPPB: 2e8, + execDeviationPPB: 2e8, + expDeviates: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := DAGasPriceEstimator{ + execEstimator: ExecGasPriceEstimator{ + deviationPPB: tc.execDeviationPPB, + }, + daDeviationPPB: tc.daDeviationPPB, + priceEncodingLength: daGasPriceEncodingLength, + } + + deviated, err := g.Deviates(tc.gasPrice1, tc.gasPrice2) + assert.NoError(t, err) + if tc.expDeviates { + assert.True(t, deviated) + } else { + assert.False(t, deviated) + } + }) + } +} + +func TestDAPriceEstimator_EstimateMsgCostUSD(t *testing.T) { + execCostUSD := big.NewInt(100_000) + + testCases := []struct { + name string + gasPrice *big.Int + wrappedNativePrice *big.Int + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + daOverheadGas int64 + gasPerDAByte int64 + daMultiplier int64 + expUSD *big.Int + }{ + { + name: "only DA overhead", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + SourceTokenData: [][]byte{}, + }, + }, + daOverheadGas: 100_000, + gasPerDAByte: 0, + daMultiplier: 10_000, // 1x multiplier + expUSD: new(big.Int).Add(execCostUSD, big.NewInt(100_000e9)), + }, + { + name: "include message data gas", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + Data: make([]byte, 1_000), + TokenAmounts: make([]cciptypes.TokenAmount, 5), + SourceTokenData: [][]byte{ + make([]byte, 10), make([]byte, 10), make([]byte, 10), make([]byte, 10), make([]byte, 10), + }, + }, + }, + daOverheadGas: 100_000, + gasPerDAByte: 16, + daMultiplier: 10_000, // 1x multiplier + expUSD: new(big.Int).Add(execCostUSD, big.NewInt(134_208e9)), + }, + { + name: "zero DA price", + gasPrice: big.NewInt(0), // 1 gwei DA price, 0 exec price + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + SourceTokenData: [][]byte{}, + }, + }, + daOverheadGas: 100_000, + gasPerDAByte: 16, + daMultiplier: 10_000, // 1x multiplier + expUSD: execCostUSD, + }, + { + name: "double native price", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price + wrappedNativePrice: big.NewInt(2e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + SourceTokenData: [][]byte{}, + }, + }, + daOverheadGas: 100_000, + gasPerDAByte: 0, + daMultiplier: 10_000, // 1x multiplier + expUSD: new(big.Int).Add(execCostUSD, big.NewInt(200_000e9)), + }, + { + name: "half multiplier", + gasPrice: encodeGasPrice(big.NewInt(1e9), big.NewInt(0)), // 1 gwei DA price, 0 exec price + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + SourceTokenData: [][]byte{}, + }, + }, + daOverheadGas: 100_000, + gasPerDAByte: 0, + daMultiplier: 5_000, // 0.5x multiplier + expUSD: new(big.Int).Add(execCostUSD, big.NewInt(50_000e9)), + }, + } + + for _, tc := range testCases { + execEstimator := NewMockGasPriceEstimator(t) + execEstimator.On("EstimateMsgCostUSD", mock.Anything, tc.wrappedNativePrice, tc.msg).Return(execCostUSD, nil) + + t.Run(tc.name, func(t *testing.T) { + g := DAGasPriceEstimator{ + execEstimator: execEstimator, + l1Oracle: nil, + priceEncodingLength: daGasPriceEncodingLength, + daOverheadGas: tc.daOverheadGas, + gasPerDAByte: tc.gasPerDAByte, + daMultiplier: tc.daMultiplier, + } + + costUSD, err := g.EstimateMsgCostUSD(tc.gasPrice, tc.wrappedNativePrice, tc.msg) + assert.NoError(t, err) + assert.Equal(t, tc.expUSD, costUSD) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go new file mode 100644 index 00000000000..56e1ddb583e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator.go @@ -0,0 +1,65 @@ +package prices + +import ( + "context" + "fmt" + "math/big" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +type ExecGasPriceEstimator struct { + estimator gas.EvmFeeEstimator + maxGasPrice *big.Int + deviationPPB int64 +} + +func NewExecGasPriceEstimator(estimator gas.EvmFeeEstimator, maxGasPrice *big.Int, deviationPPB int64) ExecGasPriceEstimator { + return ExecGasPriceEstimator{ + estimator: estimator, + maxGasPrice: maxGasPrice, + deviationPPB: deviationPPB, + } +} + +func (g ExecGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) { + gasPriceWei, _, err := g.estimator.GetFee(ctx, nil, 0, assets.NewWei(g.maxGasPrice)) + if err != nil { + return nil, err + } + // Use legacy if no dynamic is available. + gasPrice := gasPriceWei.Legacy.ToInt() + if gasPriceWei.DynamicFeeCap != nil { + gasPrice = gasPriceWei.DynamicFeeCap.ToInt() + } + if gasPrice == nil { + return nil, fmt.Errorf("missing gas price %+v", gasPriceWei) + } + + return gasPrice, nil +} + +func (g ExecGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) { + return ccipcalc.CalculateUsdPerUnitGas(p, wrappedNativePrice), nil +} + +func (g ExecGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) { + return ccipcalc.BigIntSortedMiddle(gasPrices), nil +} + +func (g ExecGasPriceEstimator) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) { + return ccipcalc.Deviates(p1, p2, g.deviationPPB), nil +} + +func (g ExecGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) { + execGasAmount := new(big.Int).Add(big.NewInt(feeBoostingOverheadGas), msg.GasLimit) + execGasAmount = new(big.Int).Add(execGasAmount, new(big.Int).Mul(big.NewInt(int64(len(msg.Data))), big.NewInt(execGasPerPayloadByte))) + execGasAmount = new(big.Int).Add(execGasAmount, new(big.Int).Mul(big.NewInt(int64(len(msg.TokenAmounts))), big.NewInt(execGasPerToken))) + + execGasCost := new(big.Int).Mul(execGasAmount, p) + + return ccipcalc.CalculateUsdPerUnitGas(execGasCost, wrappedNativePrice), nil +} diff --git a/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go new file mode 100644 index 00000000000..e1c2fa03981 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/exec_price_estimator_test.go @@ -0,0 +1,351 @@ +package prices + +import ( + "context" + "math/big" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/assets" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas/mocks" +) + +func TestExecPriceEstimator_GetGasPrice(t *testing.T) { + ctx := context.Background() + + testCases := []struct { + name string + sourceFeeEstimatorRespFee gas.EvmFee + sourceFeeEstimatorRespErr error + maxGasPrice *big.Int + expPrice *big.Int + expErr bool + }{ + { + name: "gets legacy gas price", + sourceFeeEstimatorRespFee: gas.EvmFee{ + Legacy: assets.NewWei(big.NewInt(10)), + DynamicFeeCap: nil, + }, + sourceFeeEstimatorRespErr: nil, + maxGasPrice: big.NewInt(1), + expPrice: big.NewInt(10), + expErr: false, + }, + { + name: "gets dynamic gas price", + sourceFeeEstimatorRespFee: gas.EvmFee{ + Legacy: nil, + DynamicFeeCap: assets.NewWei(big.NewInt(20)), + }, + sourceFeeEstimatorRespErr: nil, + maxGasPrice: big.NewInt(1), + expPrice: big.NewInt(20), + expErr: false, + }, + { + name: "gets dynamic gas price over legacy gas price", + sourceFeeEstimatorRespFee: gas.EvmFee{ + Legacy: assets.NewWei(big.NewInt(10)), + DynamicFeeCap: assets.NewWei(big.NewInt(20)), + }, + sourceFeeEstimatorRespErr: nil, + maxGasPrice: big.NewInt(1), + expPrice: big.NewInt(20), + expErr: false, + }, + { + name: "fee estimator error", + sourceFeeEstimatorRespFee: gas.EvmFee{ + Legacy: assets.NewWei(big.NewInt(10)), + DynamicFeeCap: nil, + }, + sourceFeeEstimatorRespErr: errors.New("fee estimator error"), + maxGasPrice: big.NewInt(1), + expPrice: nil, + expErr: true, + }, + { + name: "nil gas price error", + sourceFeeEstimatorRespFee: gas.EvmFee{ + Legacy: nil, + DynamicFeeCap: nil, + }, + sourceFeeEstimatorRespErr: nil, + maxGasPrice: big.NewInt(1), + expPrice: nil, + expErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sourceFeeEstimator := mocks.NewEvmFeeEstimator(t) + sourceFeeEstimator.On("GetFee", ctx, []byte(nil), uint64(0), assets.NewWei(tc.maxGasPrice)).Return( + tc.sourceFeeEstimatorRespFee, uint64(0), tc.sourceFeeEstimatorRespErr) + + g := ExecGasPriceEstimator{ + estimator: sourceFeeEstimator, + maxGasPrice: tc.maxGasPrice, + } + + gasPrice, err := g.GetGasPrice(ctx) + if tc.expErr { + assert.Nil(t, gasPrice) + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expPrice, gasPrice) + }) + } +} + +func TestExecPriceEstimator_DenoteInUSD(t *testing.T) { + val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) } + + testCases := []struct { + name string + gasPrice *big.Int + nativePrice *big.Int + expPrice *big.Int + }{ + { + name: "base", + gasPrice: big.NewInt(1e9), + nativePrice: val1e18(2_000), + expPrice: big.NewInt(2_000e9), + }, + { + name: "low price truncates to 0", + gasPrice: big.NewInt(1e9), + nativePrice: big.NewInt(1), + expPrice: big.NewInt(0), + }, + { + name: "high price", + gasPrice: val1e18(1), + nativePrice: val1e18(2_000), + expPrice: val1e18(2_000), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := ExecGasPriceEstimator{} + + gasPrice, err := g.DenoteInUSD(tc.gasPrice, tc.nativePrice) + assert.NoError(t, err) + assert.True(t, tc.expPrice.Cmp(gasPrice) == 0) + }) + } +} + +func TestExecPriceEstimator_Median(t *testing.T) { + val1e18 := func(val int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(val)) } + + testCases := []struct { + name string + gasPrices []*big.Int + expMedian *big.Int + }{ + { + name: "base", + gasPrices: []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}, + expMedian: big.NewInt(2), + }, + { + name: "median 1", + gasPrices: []*big.Int{big.NewInt(1)}, + expMedian: big.NewInt(1), + }, + { + name: "median 2", + gasPrices: []*big.Int{big.NewInt(1), big.NewInt(2)}, + expMedian: big.NewInt(2), + }, + { + name: "large values", + gasPrices: []*big.Int{val1e18(5), val1e18(4), val1e18(3), val1e18(2), val1e18(1)}, + expMedian: val1e18(3), + }, + { + name: "zeros", + gasPrices: []*big.Int{big.NewInt(0), big.NewInt(0), big.NewInt(0)}, + expMedian: big.NewInt(0), + }, + { + name: "unsorted even number of prices", + gasPrices: []*big.Int{big.NewInt(4), big.NewInt(2), big.NewInt(3), big.NewInt(1)}, + expMedian: big.NewInt(3), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := ExecGasPriceEstimator{} + + gasPrice, err := g.Median(tc.gasPrices) + assert.NoError(t, err) + assert.True(t, tc.expMedian.Cmp(gasPrice) == 0) + }) + } +} + +func TestExecPriceEstimator_Deviates(t *testing.T) { + testCases := []struct { + name string + gasPrice1 *big.Int + gasPrice2 *big.Int + deviationPPB int64 + expDeviates bool + }{ + { + name: "base", + gasPrice1: big.NewInt(100e8), + gasPrice2: big.NewInt(79e8), + deviationPPB: 2e8, + expDeviates: true, + }, + { + name: "negative difference also deviates", + gasPrice1: big.NewInt(100e8), + gasPrice2: big.NewInt(121e8), + deviationPPB: 2e8, + expDeviates: true, + }, + { + name: "larger difference deviates", + gasPrice1: big.NewInt(100e8), + gasPrice2: big.NewInt(70e8), + deviationPPB: 2e8, + expDeviates: true, + }, + { + name: "smaller difference does not deviate", + gasPrice1: big.NewInt(100e8), + gasPrice2: big.NewInt(90e8), + deviationPPB: 2e8, + expDeviates: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := ExecGasPriceEstimator{ + deviationPPB: tc.deviationPPB, + } + + deviated, err := g.Deviates(tc.gasPrice1, tc.gasPrice2) + assert.NoError(t, err) + if tc.expDeviates { + assert.True(t, deviated) + } else { + assert.False(t, deviated) + } + }) + } +} + +func TestExecPriceEstimator_EstimateMsgCostUSD(t *testing.T) { + testCases := []struct { + name string + gasPrice *big.Int + wrappedNativePrice *big.Int + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + expUSD *big.Int + }{ + { + name: "base", + gasPrice: big.NewInt(1e9), // 1 gwei + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(100_000), + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + }, + }, + expUSD: big.NewInt(300_000e9), + }, + { + name: "base with data", + gasPrice: big.NewInt(1e9), // 1 gwei + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(100_000), + Data: make([]byte, 1_000), + TokenAmounts: []cciptypes.TokenAmount{}, + }, + }, + expUSD: big.NewInt(316_000e9), + }, + { + name: "base with data and tokens", + gasPrice: big.NewInt(1e9), // 1 gwei + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(100_000), + Data: make([]byte, 1_000), + TokenAmounts: make([]cciptypes.TokenAmount, 5), + }, + }, + expUSD: big.NewInt(366_000e9), + }, + { + name: "empty msg", + gasPrice: big.NewInt(1e9), // 1 gwei + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(0), + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + }, + }, + expUSD: big.NewInt(200_000e9), + }, + { + name: "double native price", + gasPrice: big.NewInt(1e9), // 1 gwei + wrappedNativePrice: big.NewInt(2e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(0), + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + }, + }, + expUSD: big.NewInt(400_000e9), + }, + { + name: "zero gas price", + gasPrice: big.NewInt(0), // 1 gwei + wrappedNativePrice: big.NewInt(1e18), // $1 + msg: cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + GasLimit: big.NewInt(0), + Data: []byte{}, + TokenAmounts: []cciptypes.TokenAmount{}, + }, + }, + expUSD: big.NewInt(0), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := ExecGasPriceEstimator{} + + costUSD, err := g.EstimateMsgCostUSD(tc.gasPrice, tc.wrappedNativePrice, tc.msg) + assert.NoError(t, err) + assert.Equal(t, tc.expUSD, costUSD) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go new file mode 100644 index 00000000000..49a6fbcc4ad --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator.go @@ -0,0 +1,59 @@ +package prices + +import ( + "math/big" + + "github.com/Masterminds/semver/v3" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" +) + +const ( + feeBoostingOverheadGas = 200_000 + // execGasPerToken is lower-bound estimation of ERC20 releaseOrMint gas cost (Mint with static minter). + // Use this in per-token gas cost calc as heuristic to simplify estimation logic. + execGasPerToken = 10_000 + // execGasPerPayloadByte is gas charged for passing each byte of `data` payload to CCIP receiver, ignores 4 gas per 0-byte rule. + // This can be a constant as it is part of EVM spec. Changes should be rare. + execGasPerPayloadByte = 16 + // evmMessageFixedBytes is byte size of fixed-size fields in EVM2EVMMessage + // Updating EVM2EVMMessage involves an offchain upgrade, safe to keep this as constant in code. + evmMessageFixedBytes = 448 + evmMessageBytesPerToken = 128 // Byte size of each token transfer, consisting of 1 EVMTokenAmount and 1 bytes, excl length of bytes + daMultiplierBase = int64(10000) // DA multiplier is in multiples of 0.0001, i.e. 1/daMultiplierBase + daGasPriceEncodingLength = 112 // Each gas price takes up at most GasPriceEncodingLength number of bits +) + +// GasPriceEstimatorCommit provides gasPriceEstimatorCommon + features needed in commit plugin, e.g. price deviation check. +type GasPriceEstimatorCommit interface { + cciptypes.GasPriceEstimatorCommit +} + +// GasPriceEstimatorExec provides gasPriceEstimatorCommon + features needed in exec plugin, e.g. message cost estimation. +type GasPriceEstimatorExec interface { + cciptypes.GasPriceEstimatorExec +} + +// GasPriceEstimator provides complete gas price estimator functions. +type GasPriceEstimator interface { + cciptypes.GasPriceEstimator +} + +func NewGasPriceEstimatorForCommitPlugin( + commitStoreVersion semver.Version, + estimator gas.EvmFeeEstimator, + maxExecGasPrice *big.Int, + daDeviationPPB int64, + execDeviationPPB int64, +) (GasPriceEstimatorCommit, error) { + switch commitStoreVersion.String() { + case "1.0.0", "1.1.0": + return NewExecGasPriceEstimator(estimator, maxExecGasPrice, execDeviationPPB), nil + case "1.2.0": + return NewDAGasPriceEstimator(estimator, maxExecGasPrice, execDeviationPPB, daDeviationPPB), nil + default: + return nil, errors.Errorf("Invalid commitStore version: %s", commitStoreVersion) + } +} diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go new file mode 100644 index 00000000000..0a366a66ac2 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_commit_mock.go @@ -0,0 +1,269 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package prices + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" +) + +// MockGasPriceEstimatorCommit is an autogenerated mock type for the GasPriceEstimatorCommit type +type MockGasPriceEstimatorCommit struct { + mock.Mock +} + +type MockGasPriceEstimatorCommit_Expecter struct { + mock *mock.Mock +} + +func (_m *MockGasPriceEstimatorCommit) EXPECT() *MockGasPriceEstimatorCommit_Expecter { + return &MockGasPriceEstimatorCommit_Expecter{mock: &_m.Mock} +} + +// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice +func (_m *MockGasPriceEstimatorCommit) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) { + ret := _m.Called(p, wrappedNativePrice) + + if len(ret) == 0 { + panic("no return value specified for DenoteInUSD") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok { + return rf(p, wrappedNativePrice) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok { + r0 = rf(p, wrappedNativePrice) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { + r1 = rf(p, wrappedNativePrice) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorCommit_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD' +type MockGasPriceEstimatorCommit_DenoteInUSD_Call struct { + *mock.Call +} + +// DenoteInUSD is a helper method to define mock.On call +// - p *big.Int +// - wrappedNativePrice *big.Int +func (_e *MockGasPriceEstimatorCommit_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimatorCommit_DenoteInUSD_Call { + return &MockGasPriceEstimatorCommit_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)} +} + +func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimatorCommit_DenoteInUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_DenoteInUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimatorCommit_DenoteInUSD_Call { + _c.Call.Return(run) + return _c +} + +// Deviates provides a mock function with given fields: p1, p2 +func (_m *MockGasPriceEstimatorCommit) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) { + ret := _m.Called(p1, p2) + + if len(ret) == 0 { + panic("no return value specified for Deviates") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (bool, error)); ok { + return rf(p1, p2) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) bool); ok { + r0 = rf(p1, p2) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { + r1 = rf(p1, p2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorCommit_Deviates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deviates' +type MockGasPriceEstimatorCommit_Deviates_Call struct { + *mock.Call +} + +// Deviates is a helper method to define mock.On call +// - p1 *big.Int +// - p2 *big.Int +func (_e *MockGasPriceEstimatorCommit_Expecter) Deviates(p1 interface{}, p2 interface{}) *MockGasPriceEstimatorCommit_Deviates_Call { + return &MockGasPriceEstimatorCommit_Deviates_Call{Call: _e.mock.On("Deviates", p1, p2)} +} + +func (_c *MockGasPriceEstimatorCommit_Deviates_Call) Run(run func(p1 *big.Int, p2 *big.Int)) *MockGasPriceEstimatorCommit_Deviates_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_Deviates_Call) Return(_a0 bool, _a1 error) *MockGasPriceEstimatorCommit_Deviates_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_Deviates_Call) RunAndReturn(run func(*big.Int, *big.Int) (bool, error)) *MockGasPriceEstimatorCommit_Deviates_Call { + _c.Call.Return(run) + return _c +} + +// GetGasPrice provides a mock function with given fields: ctx +func (_m *MockGasPriceEstimatorCommit) GetGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorCommit_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice' +type MockGasPriceEstimatorCommit_GetGasPrice_Call struct { + *mock.Call +} + +// GetGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockGasPriceEstimatorCommit_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimatorCommit_GetGasPrice_Call { + return &MockGasPriceEstimatorCommit_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)} +} + +func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimatorCommit_GetGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_GetGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimatorCommit_GetGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// Median provides a mock function with given fields: gasPrices +func (_m *MockGasPriceEstimatorCommit) Median(gasPrices []*big.Int) (*big.Int, error) { + ret := _m.Called(gasPrices) + + if len(ret) == 0 { + panic("no return value specified for Median") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok { + return rf(gasPrices) + } + if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok { + r0 = rf(gasPrices) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func([]*big.Int) error); ok { + r1 = rf(gasPrices) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorCommit_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median' +type MockGasPriceEstimatorCommit_Median_Call struct { + *mock.Call +} + +// Median is a helper method to define mock.On call +// - gasPrices []*big.Int +func (_e *MockGasPriceEstimatorCommit_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimatorCommit_Median_Call { + return &MockGasPriceEstimatorCommit_Median_Call{Call: _e.mock.On("Median", gasPrices)} +} + +func (_c *MockGasPriceEstimatorCommit_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimatorCommit_Median_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorCommit_Median_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorCommit_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimatorCommit_Median_Call { + _c.Call.Return(run) + return _c +} + +// NewMockGasPriceEstimatorCommit creates a new instance of MockGasPriceEstimatorCommit. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockGasPriceEstimatorCommit(t interface { + mock.TestingT + Cleanup(func()) +}) *MockGasPriceEstimatorCommit { + mock := &MockGasPriceEstimatorCommit{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go new file mode 100644 index 00000000000..8f778555b17 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_exec_mock.go @@ -0,0 +1,274 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package prices + +import ( + context "context" + big "math/big" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + mock "github.com/stretchr/testify/mock" +) + +// MockGasPriceEstimatorExec is an autogenerated mock type for the GasPriceEstimatorExec type +type MockGasPriceEstimatorExec struct { + mock.Mock +} + +type MockGasPriceEstimatorExec_Expecter struct { + mock *mock.Mock +} + +func (_m *MockGasPriceEstimatorExec) EXPECT() *MockGasPriceEstimatorExec_Expecter { + return &MockGasPriceEstimatorExec_Expecter{mock: &_m.Mock} +} + +// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice +func (_m *MockGasPriceEstimatorExec) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) { + ret := _m.Called(p, wrappedNativePrice) + + if len(ret) == 0 { + panic("no return value specified for DenoteInUSD") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok { + return rf(p, wrappedNativePrice) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok { + r0 = rf(p, wrappedNativePrice) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { + r1 = rf(p, wrappedNativePrice) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorExec_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD' +type MockGasPriceEstimatorExec_DenoteInUSD_Call struct { + *mock.Call +} + +// DenoteInUSD is a helper method to define mock.On call +// - p *big.Int +// - wrappedNativePrice *big.Int +func (_e *MockGasPriceEstimatorExec_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimatorExec_DenoteInUSD_Call { + return &MockGasPriceEstimatorExec_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)} +} + +func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimatorExec_DenoteInUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_DenoteInUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorExec_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimatorExec_DenoteInUSD_Call { + _c.Call.Return(run) + return _c +} + +// EstimateMsgCostUSD provides a mock function with given fields: p, wrappedNativePrice, msg +func (_m *MockGasPriceEstimatorExec) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) { + ret := _m.Called(p, wrappedNativePrice, msg) + + if len(ret) == 0 { + panic("no return value specified for EstimateMsgCostUSD") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)); ok { + return rf(p, wrappedNativePrice, msg) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int); ok { + r0 = rf(p, wrappedNativePrice, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) error); ok { + r1 = rf(p, wrappedNativePrice, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateMsgCostUSD' +type MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call struct { + *mock.Call +} + +// EstimateMsgCostUSD is a helper method to define mock.On call +// - p *big.Int +// - wrappedNativePrice *big.Int +// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta +func (_e *MockGasPriceEstimatorExec_Expecter) EstimateMsgCostUSD(p interface{}, wrappedNativePrice interface{}, msg interface{}) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call { + return &MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call{Call: _e.mock.On("EstimateMsgCostUSD", p, wrappedNativePrice, msg)} +} + +func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int), args[2].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call) RunAndReturn(run func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)) *MockGasPriceEstimatorExec_EstimateMsgCostUSD_Call { + _c.Call.Return(run) + return _c +} + +// GetGasPrice provides a mock function with given fields: ctx +func (_m *MockGasPriceEstimatorExec) GetGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorExec_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice' +type MockGasPriceEstimatorExec_GetGasPrice_Call struct { + *mock.Call +} + +// GetGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockGasPriceEstimatorExec_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimatorExec_GetGasPrice_Call { + return &MockGasPriceEstimatorExec_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)} +} + +func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimatorExec_GetGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_GetGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorExec_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimatorExec_GetGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// Median provides a mock function with given fields: gasPrices +func (_m *MockGasPriceEstimatorExec) Median(gasPrices []*big.Int) (*big.Int, error) { + ret := _m.Called(gasPrices) + + if len(ret) == 0 { + panic("no return value specified for Median") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok { + return rf(gasPrices) + } + if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok { + r0 = rf(gasPrices) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func([]*big.Int) error); ok { + r1 = rf(gasPrices) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimatorExec_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median' +type MockGasPriceEstimatorExec_Median_Call struct { + *mock.Call +} + +// Median is a helper method to define mock.On call +// - gasPrices []*big.Int +func (_e *MockGasPriceEstimatorExec_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimatorExec_Median_Call { + return &MockGasPriceEstimatorExec_Median_Call{Call: _e.mock.On("Median", gasPrices)} +} + +func (_c *MockGasPriceEstimatorExec_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimatorExec_Median_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimatorExec_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimatorExec_Median_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimatorExec_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimatorExec_Median_Call { + _c.Call.Return(run) + return _c +} + +// NewMockGasPriceEstimatorExec creates a new instance of MockGasPriceEstimatorExec. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockGasPriceEstimatorExec(t interface { + mock.TestingT + Cleanup(func()) +}) *MockGasPriceEstimatorExec { + mock := &MockGasPriceEstimatorExec{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go new file mode 100644 index 00000000000..a513083319d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/prices/gas_price_estimator_mock.go @@ -0,0 +1,331 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package prices + +import ( + context "context" + big "math/big" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + mock "github.com/stretchr/testify/mock" +) + +// MockGasPriceEstimator is an autogenerated mock type for the GasPriceEstimator type +type MockGasPriceEstimator struct { + mock.Mock +} + +type MockGasPriceEstimator_Expecter struct { + mock *mock.Mock +} + +func (_m *MockGasPriceEstimator) EXPECT() *MockGasPriceEstimator_Expecter { + return &MockGasPriceEstimator_Expecter{mock: &_m.Mock} +} + +// DenoteInUSD provides a mock function with given fields: p, wrappedNativePrice +func (_m *MockGasPriceEstimator) DenoteInUSD(p *big.Int, wrappedNativePrice *big.Int) (*big.Int, error) { + ret := _m.Called(p, wrappedNativePrice) + + if len(ret) == 0 { + panic("no return value specified for DenoteInUSD") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (*big.Int, error)); ok { + return rf(p, wrappedNativePrice) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) *big.Int); ok { + r0 = rf(p, wrappedNativePrice) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { + r1 = rf(p, wrappedNativePrice) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimator_DenoteInUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DenoteInUSD' +type MockGasPriceEstimator_DenoteInUSD_Call struct { + *mock.Call +} + +// DenoteInUSD is a helper method to define mock.On call +// - p *big.Int +// - wrappedNativePrice *big.Int +func (_e *MockGasPriceEstimator_Expecter) DenoteInUSD(p interface{}, wrappedNativePrice interface{}) *MockGasPriceEstimator_DenoteInUSD_Call { + return &MockGasPriceEstimator_DenoteInUSD_Call{Call: _e.mock.On("DenoteInUSD", p, wrappedNativePrice)} +} + +func (_c *MockGasPriceEstimator_DenoteInUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int)) *MockGasPriceEstimator_DenoteInUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimator_DenoteInUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_DenoteInUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimator_DenoteInUSD_Call) RunAndReturn(run func(*big.Int, *big.Int) (*big.Int, error)) *MockGasPriceEstimator_DenoteInUSD_Call { + _c.Call.Return(run) + return _c +} + +// Deviates provides a mock function with given fields: p1, p2 +func (_m *MockGasPriceEstimator) Deviates(p1 *big.Int, p2 *big.Int) (bool, error) { + ret := _m.Called(p1, p2) + + if len(ret) == 0 { + panic("no return value specified for Deviates") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (bool, error)); ok { + return rf(p1, p2) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) bool); ok { + r0 = rf(p1, p2) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { + r1 = rf(p1, p2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimator_Deviates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deviates' +type MockGasPriceEstimator_Deviates_Call struct { + *mock.Call +} + +// Deviates is a helper method to define mock.On call +// - p1 *big.Int +// - p2 *big.Int +func (_e *MockGasPriceEstimator_Expecter) Deviates(p1 interface{}, p2 interface{}) *MockGasPriceEstimator_Deviates_Call { + return &MockGasPriceEstimator_Deviates_Call{Call: _e.mock.On("Deviates", p1, p2)} +} + +func (_c *MockGasPriceEstimator_Deviates_Call) Run(run func(p1 *big.Int, p2 *big.Int)) *MockGasPriceEstimator_Deviates_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimator_Deviates_Call) Return(_a0 bool, _a1 error) *MockGasPriceEstimator_Deviates_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimator_Deviates_Call) RunAndReturn(run func(*big.Int, *big.Int) (bool, error)) *MockGasPriceEstimator_Deviates_Call { + _c.Call.Return(run) + return _c +} + +// EstimateMsgCostUSD provides a mock function with given fields: p, wrappedNativePrice, msg +func (_m *MockGasPriceEstimator) EstimateMsgCostUSD(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error) { + ret := _m.Called(p, wrappedNativePrice, msg) + + if len(ret) == 0 { + panic("no return value specified for EstimateMsgCostUSD") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)); ok { + return rf(p, wrappedNativePrice, msg) + } + if rf, ok := ret.Get(0).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) *big.Int); ok { + r0 = rf(p, wrappedNativePrice, msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) error); ok { + r1 = rf(p, wrappedNativePrice, msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimator_EstimateMsgCostUSD_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateMsgCostUSD' +type MockGasPriceEstimator_EstimateMsgCostUSD_Call struct { + *mock.Call +} + +// EstimateMsgCostUSD is a helper method to define mock.On call +// - p *big.Int +// - wrappedNativePrice *big.Int +// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta +func (_e *MockGasPriceEstimator_Expecter) EstimateMsgCostUSD(p interface{}, wrappedNativePrice interface{}, msg interface{}) *MockGasPriceEstimator_EstimateMsgCostUSD_Call { + return &MockGasPriceEstimator_EstimateMsgCostUSD_Call{Call: _e.mock.On("EstimateMsgCostUSD", p, wrappedNativePrice, msg)} +} + +func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) Run(run func(p *big.Int, wrappedNativePrice *big.Int, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) *MockGasPriceEstimator_EstimateMsgCostUSD_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int), args[1].(*big.Int), args[2].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta)) + }) + return _c +} + +func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_EstimateMsgCostUSD_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimator_EstimateMsgCostUSD_Call) RunAndReturn(run func(*big.Int, *big.Int, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta) (*big.Int, error)) *MockGasPriceEstimator_EstimateMsgCostUSD_Call { + _c.Call.Return(run) + return _c +} + +// GetGasPrice provides a mock function with given fields: ctx +func (_m *MockGasPriceEstimator) GetGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimator_GetGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGasPrice' +type MockGasPriceEstimator_GetGasPrice_Call struct { + *mock.Call +} + +// GetGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockGasPriceEstimator_Expecter) GetGasPrice(ctx interface{}) *MockGasPriceEstimator_GetGasPrice_Call { + return &MockGasPriceEstimator_GetGasPrice_Call{Call: _e.mock.On("GetGasPrice", ctx)} +} + +func (_c *MockGasPriceEstimator_GetGasPrice_Call) Run(run func(ctx context.Context)) *MockGasPriceEstimator_GetGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockGasPriceEstimator_GetGasPrice_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_GetGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimator_GetGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *MockGasPriceEstimator_GetGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// Median provides a mock function with given fields: gasPrices +func (_m *MockGasPriceEstimator) Median(gasPrices []*big.Int) (*big.Int, error) { + ret := _m.Called(gasPrices) + + if len(ret) == 0 { + panic("no return value specified for Median") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func([]*big.Int) (*big.Int, error)); ok { + return rf(gasPrices) + } + if rf, ok := ret.Get(0).(func([]*big.Int) *big.Int); ok { + r0 = rf(gasPrices) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func([]*big.Int) error); ok { + r1 = rf(gasPrices) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGasPriceEstimator_Median_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Median' +type MockGasPriceEstimator_Median_Call struct { + *mock.Call +} + +// Median is a helper method to define mock.On call +// - gasPrices []*big.Int +func (_e *MockGasPriceEstimator_Expecter) Median(gasPrices interface{}) *MockGasPriceEstimator_Median_Call { + return &MockGasPriceEstimator_Median_Call{Call: _e.mock.On("Median", gasPrices)} +} + +func (_c *MockGasPriceEstimator_Median_Call) Run(run func(gasPrices []*big.Int)) *MockGasPriceEstimator_Median_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]*big.Int)) + }) + return _c +} + +func (_c *MockGasPriceEstimator_Median_Call) Return(_a0 *big.Int, _a1 error) *MockGasPriceEstimator_Median_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGasPriceEstimator_Median_Call) RunAndReturn(run func([]*big.Int) (*big.Int, error)) *MockGasPriceEstimator_Median_Call { + _c.Call.Return(run) + return _c +} + +// NewMockGasPriceEstimator creates a new instance of MockGasPriceEstimator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockGasPriceEstimator(t interface { + mock.TestingT + Cleanup(func()) +}) *MockGasPriceEstimator { + mock := &MockGasPriceEstimator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/proxycommitstore.go b/core/services/ocr2/plugins/ccip/proxycommitstore.go new file mode 100644 index 00000000000..b06f957bd58 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/proxycommitstore.go @@ -0,0 +1,135 @@ +package ccip + +import ( + "context" + "fmt" + "io" + "math/big" + "time" + + "go.uber.org/multierr" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +// The disjunct methods in IncompleteSourceCommitStoreReader and IncompleteDestCommitStoreReader satisfy the full +// CommitStoreReader iface in Union +var _ cciptypes.CommitStoreReader = (*ProviderProxyCommitStoreReader)(nil) + +// ProviderProxyCommitStoreReader is a CommitStoreReader that proxies to two custom provider grpc backed implementations +// of a CommitStoreReader. +// [ProviderProxyCommitStoreReader] lives in the memory space of the reporting plugin factory and reporting plugin, and should have no chain-specific details. +// Why? Historical implementations of a commit store consumed in reporting plugins mixed usage of a gas estimator from +// the source relayer and contract read and write abilities to a dest relayer. This is not valid in LOOP world. +type ProviderProxyCommitStoreReader struct { + srcCommitStoreReader IncompleteSourceCommitStoreReader + dstCommitStoreReader IncompleteDestCommitStoreReader +} + +// IncompleteSourceCommitStoreReader contains only the methods of CommitStoreReader that are serviced by the source chain/relayer. +type IncompleteSourceCommitStoreReader interface { + ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) + GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) + OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) + io.Closer +} + +// IncompleteDestCommitStoreReader contains only the methods of CommitStoreReader that are serviced by the dest chain/relayer. +type IncompleteDestCommitStoreReader interface { + DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) + EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) + GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) + GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) + GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) + GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) + GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) + IsBlessed(ctx context.Context, root [32]byte) (bool, error) + IsDestChainHealthy(ctx context.Context) (bool, error) + IsDown(ctx context.Context) (bool, error) + VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) + io.Closer +} + +func NewProviderProxyCommitStoreReader(srcReader cciptypes.CommitStoreReader, dstReader cciptypes.CommitStoreReader) *ProviderProxyCommitStoreReader { + return &ProviderProxyCommitStoreReader{ + srcCommitStoreReader: srcReader, + dstCommitStoreReader: dstReader, + } +} + +// ChangeConfig updates the offchainConfig values for the source relayer gas estimator by calling ChangeConfig +// on the source relayer. Once this is called, GasPriceEstimator and OffchainConfig can be called. +func (p *ProviderProxyCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) { + return p.srcCommitStoreReader.ChangeConfig(ctx, onchainConfig, offchainConfig) +} + +func (p *ProviderProxyCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) { + return p.dstCommitStoreReader.DecodeCommitReport(ctx, report) +} + +func (p *ProviderProxyCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) { + return p.dstCommitStoreReader.EncodeCommitReport(ctx, report) +} + +// GasPriceEstimator constructs a gas price estimator on the source relayer +func (p *ProviderProxyCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) { + return p.srcCommitStoreReader.GasPriceEstimator(ctx) +} + +func (p *ProviderProxyCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return p.dstCommitStoreReader.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confirmations) +} + +func (p *ProviderProxyCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return p.dstCommitStoreReader.GetCommitReportMatchingSeqNum(ctx, seqNum, confirmations) +} + +func (p *ProviderProxyCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + return p.dstCommitStoreReader.GetCommitStoreStaticConfig(ctx) +} + +func (p *ProviderProxyCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + return p.dstCommitStoreReader.GetExpectedNextSequenceNumber(ctx) +} + +func (p *ProviderProxyCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + return p.dstCommitStoreReader.GetLatestPriceEpochAndRound(ctx) +} + +func (p *ProviderProxyCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return p.dstCommitStoreReader.IsBlessed(ctx, root) +} + +func (p *ProviderProxyCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) { + return p.dstCommitStoreReader.IsDestChainHealthy(ctx) +} + +func (p *ProviderProxyCommitStoreReader) IsDown(ctx context.Context) (bool, error) { + return p.dstCommitStoreReader.IsDown(ctx) +} + +func (p *ProviderProxyCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) { + return p.srcCommitStoreReader.OffchainConfig(ctx) +} + +func (p *ProviderProxyCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + return p.dstCommitStoreReader.VerifyExecutionReport(ctx, report) +} + +// SetGasEstimator is invalid on ProviderProxyCommitStoreReader. The provider based impl's do not have SetGasEstimator +// defined, so this serves no purpose other than satisfying an interface. +func (p *ProviderProxyCommitStoreReader) SetGasEstimator(ctx context.Context, gpe gas.EvmFeeEstimator) error { + return fmt.Errorf("invalid usage of ProviderProxyCommitStoreReader") +} + +// SetSourceMaxGasPrice is invalid on ProviderProxyCommitStoreReader. The provider based impl's do not have SetSourceMaxGasPrice +// defined, so this serves no purpose other than satisfying an interface. +func (p *ProviderProxyCommitStoreReader) SetSourceMaxGasPrice(ctx context.Context, sourceMaxGasPrice *big.Int) error { + return fmt.Errorf("invalid usage of ProviderProxyCommitStoreReader") +} + +func (p *ProviderProxyCommitStoreReader) Close() error { + return multierr.Append(p.srcCommitStoreReader.Close(), p.dstCommitStoreReader.Close()) +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go b/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go new file mode 100644 index 00000000000..805c49d91aa --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/ccip_contracts.go @@ -0,0 +1,1580 @@ +package testhelpers + +import ( + "context" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2/confighelper" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_proxy_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_helper_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/token_admin_registry" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +var ( + // Source + SourcePool = "source Link pool" + SourcePriceRegistry = "source PriceRegistry" + OnRamp = "onramp" + OnRampNative = "onramp-native" + SourceRouter = "source router" + + // Dest + OffRamp = "offramp" + DestPool = "dest Link pool" + + Receiver = "receiver" + Sender = "sender" + Link = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) } + HundredLink = Link(100) + LinkUSDValue = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) } + SourceChainID = uint64(1000) + SourceChainSelector = uint64(11787463284727550157) + DestChainID = uint64(1337) + DestChainSelector = uint64(3379446385462418246) +) + +// Backwards compat, in principle these statuses are version dependent +// TODO: Adjust integration tests to be version agnostic using readers +var ( + ExecutionStateSuccess = MessageExecutionState(cciptypes.ExecutionStateSuccess) + ExecutionStateFailure = MessageExecutionState(cciptypes.ExecutionStateFailure) +) + +type MessageExecutionState cciptypes.MessageExecutionState +type CommitOffchainConfig struct { + v1_2_0.JSONCommitOffchainConfig +} + +func (c CommitOffchainConfig) Encode() ([]byte, error) { + return ccipconfig.EncodeOffchainConfig(c.JSONCommitOffchainConfig) +} + +func NewCommitOffchainConfig( + GasPriceHeartBeat config.Duration, + DAGasPriceDeviationPPB uint32, + ExecGasPriceDeviationPPB uint32, + TokenPriceHeartBeat config.Duration, + TokenPriceDeviationPPB uint32, + InflightCacheExpiry config.Duration) CommitOffchainConfig { + return CommitOffchainConfig{v1_2_0.JSONCommitOffchainConfig{ + GasPriceHeartBeat: GasPriceHeartBeat, + DAGasPriceDeviationPPB: DAGasPriceDeviationPPB, + ExecGasPriceDeviationPPB: ExecGasPriceDeviationPPB, + TokenPriceHeartBeat: TokenPriceHeartBeat, + TokenPriceDeviationPPB: TokenPriceDeviationPPB, + InflightCacheExpiry: InflightCacheExpiry, + }} +} + +type CommitOnchainConfig struct { + ccipdata.CommitOnchainConfig +} + +func NewCommitOnchainConfig( + PriceRegistry common.Address, +) CommitOnchainConfig { + return CommitOnchainConfig{ccipdata.CommitOnchainConfig{ + PriceRegistry: PriceRegistry, + }} +} + +type ExecOnchainConfig struct { + v1_5_0.ExecOnchainConfig +} + +func NewExecOnchainConfig( + PermissionLessExecutionThresholdSeconds uint32, + Router common.Address, + PriceRegistry common.Address, + MaxNumberOfTokensPerMsg uint16, + MaxDataBytes uint32, + MaxPoolReleaseOrMintGas uint32, + MaxTokenTransferGas uint32, +) ExecOnchainConfig { + return ExecOnchainConfig{v1_5_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds, + Router: Router, + PriceRegistry: PriceRegistry, + MaxNumberOfTokensPerMsg: MaxNumberOfTokensPerMsg, + MaxDataBytes: MaxDataBytes, + MaxPoolReleaseOrMintGas: MaxPoolReleaseOrMintGas, + MaxTokenTransferGas: MaxTokenTransferGas, + }} +} + +type ExecOffchainConfig struct { + v1_2_0.JSONExecOffchainConfig +} + +func (c ExecOffchainConfig) Encode() ([]byte, error) { + return ccipconfig.EncodeOffchainConfig(c.JSONExecOffchainConfig) +} + +func NewExecOffchainConfig( + DestOptimisticConfirmations uint32, + BatchGasLimit uint32, + RelativeBoostPerWaitHour float64, + InflightCacheExpiry config.Duration, + RootSnoozeTime config.Duration, +) ExecOffchainConfig { + return ExecOffchainConfig{v1_2_0.JSONExecOffchainConfig{ + DestOptimisticConfirmations: DestOptimisticConfirmations, + BatchGasLimit: BatchGasLimit, + RelativeBoostPerWaitHour: RelativeBoostPerWaitHour, + InflightCacheExpiry: InflightCacheExpiry, + RootSnoozeTime: RootSnoozeTime, + }} +} + +type MaybeRevertReceiver struct { + Receiver *maybe_revert_message_receiver.MaybeRevertMessageReceiver + Strict bool +} + +type Common struct { + ChainID uint64 + ChainSelector uint64 + User *bind.TransactOpts + Chain *backends.SimulatedBackend + LinkToken *link_token_interface.LinkToken + LinkTokenPool *lock_release_token_pool.LockReleaseTokenPool + CustomToken *link_token_interface.LinkToken + WrappedNative *weth9.WETH9 + WrappedNativePool *lock_release_token_pool.LockReleaseTokenPool + ARM *mock_arm_contract.MockARMContract + ARMProxy *arm_proxy_contract.ARMProxyContract + PriceRegistry *price_registry_1_2_0.PriceRegistry + TokenAdminRegistry *token_admin_registry.TokenAdminRegistry +} + +type SourceChain struct { + Common + Router *router.Router + OnRamp *evm_2_evm_onramp.EVM2EVMOnRamp +} + +type DestinationChain struct { + Common + + CommitStoreHelper *commit_store_helper.CommitStoreHelper + CommitStore *commit_store.CommitStore + Router *router.Router + OffRamp *evm_2_evm_offramp.EVM2EVMOffRamp + Receivers []MaybeRevertReceiver +} + +type OCR2Config struct { + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte +} + +type BalanceAssertion struct { + Name string + Address common.Address + Expected string + Getter func(t *testing.T, addr common.Address) *big.Int + Within string +} + +type BalanceReq struct { + Name string + Addr common.Address + Getter func(t *testing.T, addr common.Address) *big.Int +} + +type CCIPContracts struct { + Source SourceChain + Dest DestinationChain + Oracles []confighelper.OracleIdentityExtra + + commitOCRConfig, execOCRConfig *OCR2Config +} + +func (c *CCIPContracts) DeployNewOffRamp(t *testing.T) { + prevOffRamp := common.HexToAddress("") + if c.Dest.OffRamp != nil { + prevOffRamp = c.Dest.OffRamp.Address() + } + offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + c.Dest.User, + c.Dest.Chain, + evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: c.Dest.CommitStore.Address(), + ChainSelector: c.Dest.ChainSelector, + SourceChainSelector: c.Source.ChainSelector, + OnRamp: c.Source.OnRamp.Address(), + PrevOffRamp: prevOffRamp, + RmnProxy: c.Dest.ARMProxy.Address(), // RMN formerly ARM + TokenAdminRegistry: c.Dest.TokenAdminRegistry.Address(), + }, + evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() + + c.Dest.OffRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, c.Dest.Chain) + require.NoError(t, err) + + c.Dest.Chain.Commit() + c.Source.Chain.Commit() +} + +func (c *CCIPContracts) EnableOffRamp(t *testing.T) { + _, err := c.Dest.Router.ApplyRampUpdates(c.Dest.User, nil, nil, []router.RouterOffRamp{{SourceChainSelector: SourceChainSelector, OffRamp: c.Dest.OffRamp.Address()}}) + require.NoError(t, err) + c.Dest.Chain.Commit() + + onChainConfig := c.CreateDefaultExecOnchainConfig(t) + offChainConfig := c.CreateDefaultExecOffchainConfig(t) + + c.SetupExecOCR2Config(t, onChainConfig, offChainConfig) +} + +func (c *CCIPContracts) EnableCommitStore(t *testing.T) { + onChainConfig := c.CreateDefaultCommitOnchainConfig(t) + offChainConfig := c.CreateDefaultCommitOffchainConfig(t) + + c.SetupCommitOCR2Config(t, onChainConfig, offChainConfig) + + _, err := c.Dest.PriceRegistry.ApplyPriceUpdatersUpdates(c.Dest.User, []common.Address{c.Dest.CommitStore.Address()}, []common.Address{}) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) DeployNewOnRamp(t *testing.T) { + t.Log("Deploying new onRamp") + // find the last onRamp + prevOnRamp := common.HexToAddress("") + if c.Source.OnRamp != nil { + prevOnRamp = c.Source.OnRamp.Address() + } + onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + c.Source.User, // user + c.Source.Chain, // client + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: c.Source.LinkToken.Address(), + ChainSelector: c.Source.ChainSelector, + DestChainSelector: c.Dest.ChainSelector, + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: prevOnRamp, + RmnProxy: c.Source.ARM.Address(), // RMN, formerly ARM + TokenAdminRegistry: c.Source.TokenAdminRegistry.Address(), + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: c.Source.Router.Address(), + MaxNumberOfTokensPerMsg: 5, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: c.Source.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxPerMsgGasLimit: 4_000_000, + DefaultTokenFeeUSDCents: 50, + DefaultTokenDestGasOverhead: 34_000, + DefaultTokenDestBytesOverhead: 500, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: c.Source.LinkToken.Address(), + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 9e17, + Enabled: true, + }, + { + Token: c.Source.WrappedNative.Address(), + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: c.Source.LinkToken.Address(), + MinFeeUSDCents: 50, // $0.5 + MaxFeeUSDCents: 1_000_000_00, // $ 1 million + DeciBps: 5_0, // 5 bps + DestGasOverhead: 34_000, + DestBytesOverhead: 32, + AggregateRateLimitEnabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + c.Source.OnRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, c.Source.Chain) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) EnableOnRamp(t *testing.T) { + t.Log("Setting onRamp on source router") + _, err := c.Source.Router.ApplyRampUpdates(c.Source.User, []router.RouterOnRamp{{DestChainSelector: c.Dest.ChainSelector, OnRamp: c.Source.OnRamp.Address()}}, nil, nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) DeployNewCommitStore(t *testing.T) { + commitStoreAddress, _, _, err := commit_store_helper_1_2_0.DeployCommitStoreHelper( + c.Dest.User, // user + c.Dest.Chain, // client + commit_store_helper_1_2_0.CommitStoreStaticConfig{ + ChainSelector: c.Dest.ChainSelector, + SourceChainSelector: c.Source.ChainSelector, + OnRamp: c.Source.OnRamp.Address(), + ArmProxy: c.Dest.ARMProxy.Address(), + }, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() + c.Dest.CommitStoreHelper, err = commit_store_helper.NewCommitStoreHelper(commitStoreAddress, c.Dest.Chain) + require.NoError(t, err) + // since CommitStoreHelper derives from CommitStore, it's safe to instantiate both on same address + c.Dest.CommitStore, err = commit_store.NewCommitStore(commitStoreAddress, c.Dest.Chain) + require.NoError(t, err) +} + +func (c *CCIPContracts) DeployNewPriceRegistry(t *testing.T) { + t.Log("Deploying new Price Registry") + destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + c.Dest.User, + c.Dest.Chain, + []common.Address{c.Dest.CommitStore.Address()}, + []common.Address{c.Dest.LinkToken.Address()}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + c.Dest.PriceRegistry, err = price_registry_1_2_0.NewPriceRegistry(destPricesAddress, c.Dest.Chain) + require.NoError(t, err) + + priceUpdates := price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{ + { + SourceToken: c.Dest.LinkToken.Address(), + UsdPerToken: big.NewInt(8e18), // 8usd + }, + { + SourceToken: c.Dest.WrappedNative.Address(), + UsdPerToken: big.NewInt(1e18), // 1usd + }, + }, + GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{ + { + DestChainSelector: c.Source.ChainSelector, + UsdPerUnitGas: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9 + }, + }, + } + _, err = c.Dest.PriceRegistry.UpdatePrices(c.Dest.User, priceUpdates) + require.NoError(t, err) + + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + + t.Logf("New Price Registry deployed at %s", destPricesAddress.String()) +} + +func (c *CCIPContracts) SetNopsOnRamp(t *testing.T, nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight) { + tx, err := c.Source.OnRamp.SetNops(c.Source.User, nopsAndWeights) + require.NoError(t, err) + c.Source.Chain.Commit() + _, err = bind.WaitMined(context.Background(), c.Source.Chain, tx) + require.NoError(t, err) +} + +func (c *CCIPContracts) GetSourceLinkBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Source.Chain, c.Source.LinkToken.Address(), addr) +} + +func (c *CCIPContracts) GetDestLinkBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Dest.Chain, c.Dest.LinkToken.Address(), addr) +} + +func (c *CCIPContracts) GetSourceWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Source.Chain, c.Source.WrappedNative.Address(), addr) +} + +func (c *CCIPContracts) GetDestWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Dest.Chain, c.Dest.WrappedNative.Address(), addr) +} + +func (c *CCIPContracts) AssertBalances(t *testing.T, bas []BalanceAssertion) { + for _, b := range bas { + actual := b.Getter(t, b.Address) + t.Log("Checking balance for", b.Name, "at", b.Address.Hex(), "got", actual) + require.NotNil(t, actual, "%v getter return nil", b.Name) + if b.Within == "" { + require.Equal(t, b.Expected, actual.String(), "wrong balance for %s got %s want %s", b.Name, actual, b.Expected) + } else { + bi, _ := big.NewInt(0).SetString(b.Expected, 10) + withinI, _ := big.NewInt(0).SetString(b.Within, 10) + high := big.NewInt(0).Add(bi, withinI) + low := big.NewInt(0).Sub(bi, withinI) + require.Equal(t, -1, actual.Cmp(high), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + require.Equal(t, 1, actual.Cmp(low), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + } + } +} + +func AccountToAddress(accounts []ocr2types.Account) (addresses []common.Address, err error) { + for _, signer := range accounts { + bytes, err := hexutil.Decode(string(signer)) + if err != nil { + return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer)) + } + if len(bytes) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(bytes)) + } + return addresses, nil +} + +func OnchainPublicKeyToAddress(publicKeys []ocrtypes.OnchainPublicKey) (addresses []common.Address, err error) { + for _, signer := range publicKeys { + if len(signer) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(signer)) + } + return addresses, nil +} + +func (c *CCIPContracts) DeriveOCR2Config(t *testing.T, oracles []confighelper.OracleIdentityExtra, rawOnchainConfig []byte, rawOffchainConfig []byte) *OCR2Config { + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 2*time.Second, // deltaProgress + 1*time.Second, // deltaResend + 1*time.Second, // deltaRound + 500*time.Millisecond, // deltaGrace + 2*time.Second, // deltaStage + 3, + []int{1, 1, 1, 1}, + oracles, + rawOffchainConfig, + 50*time.Millisecond, // Max duration query + 1*time.Second, // Max duration observation + 100*time.Millisecond, + 100*time.Millisecond, + 100*time.Millisecond, + 1, // faults + rawOnchainConfig, + ) + require.NoError(t, err) + lggr := logger.TestLogger(t) + lggr.Infow("Setting Config on Oracle Contract", + "signers", signers, + "transmitters", transmitters, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", offchainConfigVersion, + ) + signerAddresses, err := OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := AccountToAddress(transmitters) + require.NoError(t, err) + + return &OCR2Config{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + F: threshold, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } +} + +func (c *CCIPContracts) SetupCommitOCR2Config(t *testing.T, commitOnchainConfig, commitOffchainConfig []byte) { + c.commitOCRConfig = c.DeriveOCR2Config(t, c.Oracles, commitOnchainConfig, commitOffchainConfig) + // Set the DON on the commit store + _, err := c.Dest.CommitStore.SetOCR2Config( + c.Dest.User, + c.commitOCRConfig.Signers, + c.commitOCRConfig.Transmitters, + c.commitOCRConfig.F, + c.commitOCRConfig.OnchainConfig, + c.commitOCRConfig.OffchainConfigVersion, + c.commitOCRConfig.OffchainConfig, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) SetupExecOCR2Config(t *testing.T, execOnchainConfig, execOffchainConfig []byte) { + c.execOCRConfig = c.DeriveOCR2Config(t, c.Oracles, execOnchainConfig, execOffchainConfig) + // Same DON on the offramp + _, err := c.Dest.OffRamp.SetOCR2Config( + c.Dest.User, + c.execOCRConfig.Signers, + c.execOCRConfig.Transmitters, + c.execOCRConfig.F, + c.execOCRConfig.OnchainConfig, + c.execOCRConfig.OffchainConfigVersion, + c.execOCRConfig.OffchainConfig, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) SetupOnchainConfig(t *testing.T, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig []byte) int64 { + // Note We do NOT set the payees, payment is done in the OCR2Base implementation + blockBeforeConfig, err := c.Dest.Chain.BlockByNumber(context.Background(), nil) + require.NoError(t, err) + + c.SetupCommitOCR2Config(t, commitOnchainConfig, commitOffchainConfig) + c.SetupExecOCR2Config(t, execOnchainConfig, execOffchainConfig) + + return blockBeforeConfig.Number().Int64() +} + +func (c *CCIPContracts) SendMessage(t *testing.T, gasLimit, tokenAmount *big.Int, receiverAddr common.Address) { + extraArgs, err := GetEVMExtraArgsV1(gasLimit, false) + require.NoError(t, err) + msg := router.ClientEVM2AnyMessage{ + Receiver: MustEncodeAddress(t, receiverAddr), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: c.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: c.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err := c.Source.Router.GetFee(nil, c.Dest.ChainSelector, msg) + require.NoError(t, err) + // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice. + // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err = c.Source.LinkToken.Approve(c.Source.User, c.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err) + c.Source.Chain.Commit() + c.SendRequest(t, msg) +} + +func GetBalances(t *testing.T, brs []BalanceReq) (map[string]*big.Int, error) { + m := make(map[string]*big.Int) + for _, br := range brs { + m[br.Name] = br.Getter(t, br.Addr) + if m[br.Name] == nil { + return nil, fmt.Errorf("%v getter return nil", br.Name) + } + } + return m, nil +} + +func MustAddBigInt(a *big.Int, b string) *big.Int { + bi, _ := big.NewInt(0).SetString(b, 10) + return big.NewInt(0).Add(a, bi) +} + +func MustSubBigInt(a *big.Int, b string) *big.Int { + bi, _ := big.NewInt(0).SetString(b, 10) + return big.NewInt(0).Sub(a, bi) +} + +func MustEncodeAddress(t *testing.T, address common.Address) []byte { + bts, err := utils.ABIEncode(`[{"type":"address"}]`, address) + require.NoError(t, err) + return bts +} + +func SetAdminAndRegisterPool(t *testing.T, + chain *backends.SimulatedBackend, + user *bind.TransactOpts, + tokenAdminRegistry *token_admin_registry.TokenAdminRegistry, + tokenAddress common.Address, + poolAddress common.Address) { + _, err := tokenAdminRegistry.ProposeAdministrator(user, tokenAddress, user.From) + require.NoError(t, err) + _, err = tokenAdminRegistry.AcceptAdminRole(user, tokenAddress) + require.NoError(t, err) + _, err = tokenAdminRegistry.SetPool(user, tokenAddress, poolAddress) + require.NoError(t, err) + + chain.Commit() +} + +func SetupCCIPContracts(t *testing.T, sourceChainID, sourceChainSelector, destChainID, destChainSelector uint64) CCIPContracts { + sourceChain, sourceUser := SetupChain(t) + destChain, destUser := SetupChain(t) + + // ================================================================ + // │ Deploy RMN │ + // ================================================================ + + armSourceAddress, _, _, err := mock_arm_contract.DeployMockARMContract( + sourceUser, + sourceChain, + ) + require.NoError(t, err) + sourceARM, err := mock_arm_contract.NewMockARMContract(armSourceAddress, sourceChain) + require.NoError(t, err) + armProxySourceAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract( + sourceUser, + sourceChain, + armSourceAddress, + ) + require.NoError(t, err) + sourceARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxySourceAddress, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + + armDestAddress, _, _, err := mock_arm_contract.DeployMockARMContract( + destUser, + destChain, + ) + require.NoError(t, err) + armProxyDestAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract( + destUser, + destChain, + armDestAddress, + ) + require.NoError(t, err) + destChain.Commit() + destARM, err := mock_arm_contract.NewMockARMContract(armDestAddress, destChain) + require.NoError(t, err) + destARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxyDestAddress, destChain) + require.NoError(t, err) + + // ================================================================ + // │ Deploy TokenAdminRegistry │ + // ================================================================ + + sourceTokenAdminRegistryAddress, _, _, err := token_admin_registry.DeployTokenAdminRegistry(sourceUser, sourceChain) + require.NoError(t, err) + sourceTokenAdminRegistry, err := token_admin_registry.NewTokenAdminRegistry(sourceTokenAdminRegistryAddress, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + + destTokenAdminRegistryAddress, _, _, err := token_admin_registry.DeployTokenAdminRegistry(destUser, destChain) + require.NoError(t, err) + destTokenAdminRegistry, err := token_admin_registry.NewTokenAdminRegistry(destTokenAdminRegistryAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + // ================================================================ + // │ Deploy Tokens │ + // ================================================================ + + // Deploy link token and pool on source chain + sourceLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + sourceLinkToken, err := link_token_interface.NewLinkToken(sourceLinkTokenAddress, sourceChain) + require.NoError(t, err) + t.Logf("Deloyed LINK token on source chain at %s", sourceLinkTokenAddress.String()) + + sourceWeth9addr, _, _, err := weth9.DeployWETH9(sourceUser, sourceChain) + require.NoError(t, err) + sourceWrapped, err := weth9.NewWETH9(sourceWeth9addr, sourceChain) + require.NoError(t, err) + t.Logf("Deloyed WETH9 token on source chain at %s", sourceWeth9addr.String()) + + sourceCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain) + require.NoError(t, err) + sourceCustomToken, err := link_token_interface.NewLinkToken(sourceCustomTokenAddress, sourceChain) + require.NoError(t, err) + destChain.Commit() + t.Logf("Deloyed custom token on source chain at %s", sourceCustomTokenAddress.String()) + + // Dest chain + + destLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain) + require.NoError(t, err) + destChain.Commit() + destLinkToken, err := link_token_interface.NewLinkToken(destLinkTokenAddress, destChain) + require.NoError(t, err) + t.Logf("Deloyed LINK token on dest chain at %s", destLinkTokenAddress.String()) + + destWeth9addr, _, _, err := weth9.DeployWETH9(destUser, destChain) + require.NoError(t, err) + destWrapped, err := weth9.NewWETH9(destWeth9addr, destChain) + require.NoError(t, err) + t.Logf("Deloyed WETH9 token on dest chain at %s", destWeth9addr.String()) + + destCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain) + require.NoError(t, err) + destCustomToken, err := link_token_interface.NewLinkToken(destCustomTokenAddress, destChain) + require.NoError(t, err) + destChain.Commit() + t.Logf("Deloyed custom token on dest chain at %s", destCustomTokenAddress.String()) + + // ================================================================ + // │ Deploy Routers │ + // ================================================================ + + sourceRouterAddress, _, _, err := router.DeployRouter(sourceUser, sourceChain, sourceWeth9addr, armProxySourceAddress) + require.NoError(t, err) + sourceRouter, err := router.NewRouter(sourceRouterAddress, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + + destRouterAddress, _, _, err := router.DeployRouter(destUser, destChain, destWeth9addr, armProxyDestAddress) + require.NoError(t, err) + destRouter, err := router.NewRouter(destRouterAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + // ================================================================ + // │ Deploy Pools │ + // ================================================================ + + sourcePoolLinkAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + sourceUser, + sourceChain, + sourceLinkTokenAddress, + []common.Address{}, + armProxySourceAddress, + true, + sourceRouterAddress, + ) + require.NoError(t, err) + sourceChain.Commit() + SetAdminAndRegisterPool(t, sourceChain, sourceUser, sourceTokenAdminRegistry, sourceLinkTokenAddress, sourcePoolLinkAddress) + + sourceLinkPool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourcePoolLinkAddress, sourceChain) + require.NoError(t, err) + + sourceWeth9PoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + sourceUser, + sourceChain, + sourceWeth9addr, + []common.Address{}, + armProxySourceAddress, + true, + sourceRouterAddress, + ) + require.NoError(t, err) + sourceChain.Commit() + SetAdminAndRegisterPool(t, sourceChain, sourceUser, sourceTokenAdminRegistry, sourceWeth9addr, sourceWeth9PoolAddress) + + sourceWeth9Pool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourceWeth9PoolAddress, sourceChain) + require.NoError(t, err) + + // dest + + destPoolLinkAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + destUser, + destChain, + destLinkTokenAddress, + []common.Address{}, + armProxyDestAddress, + true, + destRouterAddress, + ) + require.NoError(t, err) + destChain.Commit() + SetAdminAndRegisterPool(t, destChain, destUser, destTokenAdminRegistry, destLinkTokenAddress, destPoolLinkAddress) + + destLinkPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destPoolLinkAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + // Float the offramp pool + o, err := destLinkPool.Owner(nil) + require.NoError(t, err) + require.Equal(t, destUser.From.String(), o.String()) + _, err = destLinkPool.SetRebalancer(destUser, destUser.From) + require.NoError(t, err) + _, err = destLinkToken.Approve(destUser, destPoolLinkAddress, Link(200)) + require.NoError(t, err) + _, err = destLinkPool.ProvideLiquidity(destUser, Link(200)) + require.NoError(t, err) + destChain.Commit() + + destWrappedPoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + destUser, + destChain, + destWeth9addr, + []common.Address{}, + armProxyDestAddress, + true, + destRouterAddress, + ) + require.NoError(t, err) + destChain.Commit() + SetAdminAndRegisterPool(t, destChain, destUser, destTokenAdminRegistry, destWeth9addr, destWrappedPoolAddress) + + destWrappedPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destWrappedPoolAddress, destChain) + require.NoError(t, err) + + poolFloatValue := big.NewInt(1e18) + + destUser.Value = poolFloatValue + _, err = destWrapped.Deposit(destUser) + require.NoError(t, err) + destChain.Commit() + destUser.Value = nil + + _, err = destWrapped.Transfer(destUser, destWrappedPool.Address(), poolFloatValue) + require.NoError(t, err) + destChain.Commit() + + // ================================================================ + // │ Configure token pools │ + // ================================================================ + + abiEncodedDestLinkPool, err := abihelpers.EncodeAddress(destLinkPool.Address()) + require.NoError(t, err) + abiEncodedDestLinkTokenAddress, err := abihelpers.EncodeAddress(destLinkToken.Address()) + require.NoError(t, err) + _, err = sourceLinkPool.ApplyChainUpdates( + sourceUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: DestChainSelector, + RemotePoolAddress: abiEncodedDestLinkPool, + RemoteTokenAddress: abiEncodedDestLinkTokenAddress, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + + abiEncodedDestWrappedPool, err := abihelpers.EncodeAddress(destWrappedPool.Address()) + require.NoError(t, err) + abiEncodedDestWrappedTokenAddr, err := abihelpers.EncodeAddress(destWeth9addr) + require.NoError(t, err) + _, err = sourceWeth9Pool.ApplyChainUpdates( + sourceUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: DestChainSelector, + RemotePoolAddress: abiEncodedDestWrappedPool, + RemoteTokenAddress: abiEncodedDestWrappedTokenAddr, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + sourceChain.Commit() + + abiEncodedSourceLinkPool, err := abihelpers.EncodeAddress(sourceLinkPool.Address()) + require.NoError(t, err) + abiEncodedSourceLinkTokenAddr, err := abihelpers.EncodeAddress(sourceLinkTokenAddress) + require.NoError(t, err) + _, err = destLinkPool.ApplyChainUpdates( + destUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: SourceChainSelector, + RemotePoolAddress: abiEncodedSourceLinkPool, + RemoteTokenAddress: abiEncodedSourceLinkTokenAddr, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + + abiEncodedSourceWrappedPool, err := abihelpers.EncodeAddress(sourceWeth9Pool.Address()) + require.NoError(t, err) + abiEncodedSourceWrappedTokenAddr, err := abihelpers.EncodeAddress(sourceWrapped.Address()) + require.NoError(t, err) + _, err = destWrappedPool.ApplyChainUpdates( + destUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: SourceChainSelector, + RemotePoolAddress: abiEncodedSourceWrappedPool, + RemoteTokenAddress: abiEncodedSourceWrappedTokenAddr, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + destChain.Commit() + + // ================================================================ + // │ Deploy Price Registry │ + // ================================================================ + + sourcePricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + sourceUser, + sourceChain, + nil, + []common.Address{sourceLinkTokenAddress, sourceWeth9addr}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + + srcPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(sourcePricesAddress, sourceChain) + require.NoError(t, err) + + _, err = srcPriceRegistry.UpdatePrices(sourceUser, price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{ + { + SourceToken: sourceLinkTokenAddress, + UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20)), + }, + { + SourceToken: sourceWeth9addr, + UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(2000)), + }, + }, + GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{ + { + DestChainSelector: destChainSelector, + UsdPerUnitGas: big.NewInt(20000e9), + }, + }, + }) + require.NoError(t, err) + + // ================================================================ + // │ Deploy Lane │ + // ================================================================ + + onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + sourceUser, // user + sourceChain, // client + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: sourceLinkTokenAddress, + ChainSelector: sourceChainSelector, + DestChainSelector: destChainSelector, + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: common.HexToAddress(""), + RmnProxy: armProxySourceAddress, // RMN, formerly ARM + TokenAdminRegistry: sourceTokenAdminRegistry.Address(), + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: sourceRouterAddress, + MaxNumberOfTokensPerMsg: 5, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: sourcePricesAddress, + MaxDataBytes: 1e5, + MaxPerMsgGasLimit: 4_000_000, + DefaultTokenFeeUSDCents: 50, + DefaultTokenDestGasOverhead: 34_000, + DefaultTokenDestBytesOverhead: 500, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: sourceLinkTokenAddress, + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 9e17, + Enabled: true, + }, + { + Token: sourceWeth9addr, + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: sourceLinkTokenAddress, + MinFeeUSDCents: 50, // $0.5 + MaxFeeUSDCents: 1_000_000_00, // $ 1 million + DeciBps: 5_0, // 5 bps + DestGasOverhead: 34_000, + DestBytesOverhead: 32, + AggregateRateLimitEnabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + require.NoError(t, err) + onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, sourceChain) + require.NoError(t, err) + + _, err = sourceRouter.ApplyRampUpdates(sourceUser, []router.RouterOnRamp{{DestChainSelector: destChainSelector, OnRamp: onRampAddress}}, nil, nil) + require.NoError(t, err) + sourceChain.Commit() + + destPriceRegistryAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + destUser, + destChain, + nil, + []common.Address{destLinkTokenAddress, destWeth9addr}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + destPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(destPriceRegistryAddress, destChain) + require.NoError(t, err) + + // Deploy commit store. + commitStoreAddress, _, _, err := commit_store_helper_1_2_0.DeployCommitStoreHelper( + destUser, // user + destChain, // client + commit_store_helper_1_2_0.CommitStoreStaticConfig{ + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp.Address(), + ArmProxy: destARMProxy.Address(), + }, + ) + require.NoError(t, err) + destChain.Commit() + commitStore, err := commit_store.NewCommitStore(commitStoreAddress, destChain) + require.NoError(t, err) + commitStoreHelper, err := commit_store_helper.NewCommitStoreHelper(commitStoreAddress, destChain) + require.NoError(t, err) + + offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + destUser, + destChain, + evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: commitStore.Address(), + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRampAddress, + PrevOffRamp: common.HexToAddress(""), + RmnProxy: armProxyDestAddress, // RMN, formerly ARM + TokenAdminRegistry: destTokenAdminRegistryAddress, + }, + evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + ) + require.NoError(t, err) + offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + _, err = destPriceRegistry.ApplyPriceUpdatersUpdates(destUser, []common.Address{commitStoreAddress}, []common.Address{}) + require.NoError(t, err) + _, err = destRouter.ApplyRampUpdates( + destUser, + nil, + nil, + []router.RouterOffRamp{{SourceChainSelector: sourceChainSelector, OffRamp: offRampAddress}}, + ) + require.NoError(t, err) + + // Deploy 2 revertable (one SS one non-SS) + revertingMessageReceiver1Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false) + require.NoError(t, err) + revertingMessageReceiver1, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver1Address, destChain) + revertingMessageReceiver2Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false) + require.NoError(t, err) + revertingMessageReceiver2, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver2Address, destChain) + // Need to commit here, or we will hit the block gas limit when deploying the executor + sourceChain.Commit() + destChain.Commit() + + // Ensure we have at least finality blocks. + for i := 0; i < 50; i++ { + sourceChain.Commit() + destChain.Commit() + } + + source := SourceChain{ + Common: Common{ + ChainID: sourceChainID, + ChainSelector: sourceChainSelector, + User: sourceUser, + Chain: sourceChain, + LinkToken: sourceLinkToken, + LinkTokenPool: sourceLinkPool, + CustomToken: sourceCustomToken, + ARM: sourceARM, + ARMProxy: sourceARMProxy, + PriceRegistry: srcPriceRegistry, + WrappedNative: sourceWrapped, + WrappedNativePool: sourceWeth9Pool, + TokenAdminRegistry: sourceTokenAdminRegistry, + }, + Router: sourceRouter, + OnRamp: onRamp, + } + dest := DestinationChain{ + Common: Common{ + ChainID: destChainID, + ChainSelector: destChainSelector, + User: destUser, + Chain: destChain, + LinkToken: destLinkToken, + LinkTokenPool: destLinkPool, + CustomToken: destCustomToken, + ARM: destARM, + ARMProxy: destARMProxy, + PriceRegistry: destPriceRegistry, + WrappedNative: destWrapped, + WrappedNativePool: destWrappedPool, + TokenAdminRegistry: destTokenAdminRegistry, + }, + CommitStoreHelper: commitStoreHelper, + CommitStore: commitStore, + Router: destRouter, + OffRamp: offRamp, + Receivers: []MaybeRevertReceiver{{Receiver: revertingMessageReceiver1, Strict: false}, {Receiver: revertingMessageReceiver2, Strict: true}}, + } + + return CCIPContracts{ + Source: source, + Dest: dest, + } +} + +func (c *CCIPContracts) SendRequest(t *testing.T, msg router.ClientEVM2AnyMessage) *types.Transaction { + tx, err := c.Source.Router.CcipSend(c.Source.User, c.Dest.ChainSelector, msg) + require.NoError(t, err) + ConfirmTxs(t, []*types.Transaction{tx}, c.Source.Chain) + return tx +} + +func (c *CCIPContracts) AssertExecState(t *testing.T, log logpoller.Log, state MessageExecutionState, offRampOpts ...common.Address) { + var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp + var err error + if len(offRampOpts) > 0 { + offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.OffRamp, "no offRamp configured") + offRamp = c.Dest.OffRamp + } + executionStateChanged, err := offRamp.ParseExecutionStateChanged(log.ToGethLog()) + require.NoError(t, err) + if MessageExecutionState(executionStateChanged.State) != state { + t.Log("Execution failed", hexutil.Encode(executionStateChanged.ReturnData)) + t.Fail() + } +} + +func GetEVMExtraArgsV1(gasLimit *big.Int, strict bool) ([]byte, error) { + EVMV1Tag := []byte{0x97, 0xa6, 0x57, 0xc9} + + encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, strict) + if err != nil { + return nil, err + } + + return append(EVMV1Tag, encodedArgs...), nil +} + +func GetEVMExtraArgsV2(gasLimit *big.Int, allowOutOfOrder bool) ([]byte, error) { + // see Client.sol. + EVMV2Tag := hexutil.MustDecode("0x181dcf10") + + encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, allowOutOfOrder) + if err != nil { + return nil, err + } + + return append(EVMV2Tag, encodedArgs...), nil +} + +type ManualExecArgs struct { + SourceChainID, DestChainID uint64 + DestUser *bind.TransactOpts + SourceChain, DestChain bind.ContractBackend + SourceStartBlock *big.Int // the block in/after which failed ccip-send transaction was triggered + DestStartBlock uint64 // the start block for filtering ReportAccepted event (including the failed seq num) + // in destination chain. if not provided to be derived by ApproxDestStartBlock method + DestLatestBlockNum uint64 // current block number in destination + DestDeployedAt uint64 // destination block number for the initial destination contract deployment. + // Can be any number before the tx was reverted in destination chain. Preferably this needs to be set up with + // a value greater than zero to avoid performance issue in locating approximate destination block + SendReqLogIndex uint // log index of the CCIPSendRequested log in source chain + SendReqTxHash string // tx hash of the ccip-send transaction for which execution was reverted + CommitStore string + OnRamp string + OffRamp string + SeqNr uint64 + GasLimit *big.Int +} + +// ApproxDestStartBlock attempts to locate a block in destination chain with timestamp closest to the timestamp of the block +// in source chain in which ccip-send transaction was included +// it uses binary search to locate the block with the closest timestamp +// if the block located has a timestamp greater than the timestamp of mentioned source block +// it just returns the first block found with lesser timestamp of the source block +// providing a value of args.DestDeployedAt ensures better performance by reducing the range of block numbers to be traversed +func (args *ManualExecArgs) ApproxDestStartBlock() error { + sourceBlockHdr, err := args.SourceChain.HeaderByNumber(context.Background(), args.SourceStartBlock) + if err != nil { + return err + } + sendTxTime := sourceBlockHdr.Time + maxBlockNum := args.DestLatestBlockNum + // setting this to an approx value of 1000 considering destination chain would have at least 1000 blocks before the transaction started + minBlockNum := args.DestDeployedAt + closestBlockNum := uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2)) + var closestBlockHdr *types.Header + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + // to reduce the number of RPC calls increase the value of blockOffset + blockOffset := uint64(10) + for { + blockNum := closestBlockHdr.Number.Uint64() + if minBlockNum > maxBlockNum { + break + } + timeDiff := math.Abs(float64(closestBlockHdr.Time - sendTxTime)) + // break if the difference in timestamp is lesser than 1 minute + if timeDiff < 60 { + break + } else if closestBlockHdr.Time > sendTxTime { + maxBlockNum = blockNum - 1 + } else { + minBlockNum = blockNum + 1 + } + closestBlockNum = uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2)) + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + } + + for closestBlockHdr.Time > sendTxTime { + closestBlockNum = closestBlockNum - blockOffset + if closestBlockNum <= 0 { + return fmt.Errorf("approx destination blocknumber not found") + } + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + } + args.DestStartBlock = closestBlockHdr.Number.Uint64() + fmt.Println("using approx destination start block number", args.DestStartBlock) + return nil +} + +func (args *ManualExecArgs) FindSeqNrFromCCIPSendRequested() (uint64, error) { + var seqNr uint64 + onRampContract, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain) + if err != nil { + return seqNr, err + } + iterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{ + Start: args.SourceStartBlock.Uint64(), + }) + if err != nil { + return seqNr, err + } + for iterator.Next() { + if iterator.Event.Raw.Index == args.SendReqLogIndex && + iterator.Event.Raw.TxHash.Hex() == args.SendReqTxHash { + seqNr = iterator.Event.Message.SequenceNumber + break + } + } + if seqNr == 0 { + return seqNr, + fmt.Errorf("no CCIPSendRequested logs found for logIndex %d starting from block number %d", args.SendReqLogIndex, args.SourceStartBlock) + } + return seqNr, nil +} + +func (args *ManualExecArgs) ExecuteManually() (*types.Transaction, error) { + if args.SourceChainID == 0 || + args.DestChainID == 0 || + args.DestUser == nil { + return nil, fmt.Errorf("chain ids and owners are mandatory for source and dest chain") + } + if !common.IsHexAddress(args.CommitStore) || + !common.IsHexAddress(args.OffRamp) || + !common.IsHexAddress(args.OnRamp) { + return nil, fmt.Errorf("contract addresses must be valid hex address") + } + if args.SendReqTxHash == "" { + return nil, fmt.Errorf("tx hash of ccip-send request are required") + } + if args.SourceStartBlock == nil { + return nil, fmt.Errorf("must provide the value of source block in/after which ccip-send tx was included") + } + if args.SeqNr == 0 { + if args.SendReqLogIndex == 0 { + return nil, fmt.Errorf("must provide the value of log index of ccip-send request") + } + // locate seq nr from CCIPSendRequested log + seqNr, err := args.FindSeqNrFromCCIPSendRequested() + if err != nil { + return nil, err + } + args.SeqNr = seqNr + } + commitStore, err := commit_store.NewCommitStore(common.HexToAddress(args.CommitStore), args.DestChain) + if err != nil { + return nil, err + } + if args.DestStartBlock < 1 { + err = args.ApproxDestStartBlock() + if err != nil { + return nil, err + } + } + iterator, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: args.DestStartBlock}) + if err != nil { + return nil, err + } + + var commitReport *commit_store.CommitStoreCommitReport + for iterator.Next() { + if iterator.Event.Report.Interval.Min <= args.SeqNr && iterator.Event.Report.Interval.Max >= args.SeqNr { + commitReport = &iterator.Event.Report + fmt.Println("Found root") + break + } + } + if commitReport == nil { + return nil, fmt.Errorf("unable to find seq num %d in commit report", args.SeqNr) + } + + return args.execute(commitReport) +} + +func (args *ManualExecArgs) execute(report *commit_store.CommitStoreCommitReport) (*types.Transaction, error) { + log.Info().Msg("Executing request manually") + seqNr := args.SeqNr + // Build a merkle tree for the report + mctx := hashutil.NewKeccak() + onRampContract, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain) + if err != nil { + return nil, err + } + leafHasher := v1_2_0.NewLeafHasher(args.SourceChainID, args.DestChainID, common.HexToAddress(args.OnRamp), mctx, onRampContract) + if leafHasher == nil { + return nil, fmt.Errorf("unable to create leaf hasher") + } + + var leaves [][32]byte + var curr, prove int + var msgs []evm_2_evm_offramp.InternalEVM2EVMMessage + var manualExecGasLimits []*big.Int + var tokenData [][][]byte + sendRequestedIterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{ + Start: args.SourceStartBlock.Uint64(), + }) + if err != nil { + return nil, err + } + for sendRequestedIterator.Next() { + if sendRequestedIterator.Event.Message.SequenceNumber <= report.Interval.Max && + sendRequestedIterator.Event.Message.SequenceNumber >= report.Interval.Min { + fmt.Println("Found seq num", sendRequestedIterator.Event.Message.SequenceNumber, report.Interval) + hash, err2 := leafHasher.HashLeaf(sendRequestedIterator.Event.Raw) + if err2 != nil { + return nil, err2 + } + leaves = append(leaves, hash) + if sendRequestedIterator.Event.Message.SequenceNumber == seqNr { + fmt.Printf("Found proving %d %+v\n", curr, sendRequestedIterator.Event.Message) + var tokensAndAmounts []evm_2_evm_offramp.ClientEVMTokenAmount + for _, tokenAndAmount := range sendRequestedIterator.Event.Message.TokenAmounts { + tokensAndAmounts = append(tokensAndAmounts, evm_2_evm_offramp.ClientEVMTokenAmount{ + Token: tokenAndAmount.Token, + Amount: tokenAndAmount.Amount, + }) + } + msg := evm_2_evm_offramp.InternalEVM2EVMMessage{ + SourceChainSelector: sendRequestedIterator.Event.Message.SourceChainSelector, + Sender: sendRequestedIterator.Event.Message.Sender, + Receiver: sendRequestedIterator.Event.Message.Receiver, + SequenceNumber: sendRequestedIterator.Event.Message.SequenceNumber, + GasLimit: sendRequestedIterator.Event.Message.GasLimit, + Strict: sendRequestedIterator.Event.Message.Strict, + Nonce: sendRequestedIterator.Event.Message.Nonce, + FeeToken: sendRequestedIterator.Event.Message.FeeToken, + FeeTokenAmount: sendRequestedIterator.Event.Message.FeeTokenAmount, + Data: sendRequestedIterator.Event.Message.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: sendRequestedIterator.Event.Message.SourceTokenData, + MessageId: sendRequestedIterator.Event.Message.MessageId, + } + msgs = append(msgs, msg) + if args.GasLimit != nil { + msg.GasLimit = args.GasLimit + } + manualExecGasLimits = append(manualExecGasLimits, msg.GasLimit) + var msgTokenData [][]byte + for range sendRequestedIterator.Event.Message.TokenAmounts { + msgTokenData = append(msgTokenData, []byte{}) + } + + tokenData = append(tokenData, msgTokenData) + prove = curr + } + curr++ + } + } + sendRequestedIterator.Close() + if msgs == nil { + return nil, fmt.Errorf("unable to find msg with seqNr %d", seqNr) + } + tree, err := merklemulti.NewTree(mctx, leaves) + if err != nil { + return nil, err + } + if tree.Root() != report.MerkleRoot { + return nil, fmt.Errorf("root doesn't match") + } + + proof, err := tree.Prove([]int{prove}) + if err != nil { + return nil, err + } + + offRampProof := evm_2_evm_offramp.InternalExecutionReport{ + Messages: msgs, + OffchainTokenData: tokenData, + Proofs: proof.Hashes, + ProofFlagBits: abihelpers.ProofFlagsToBits(proof.SourceFlags), + } + offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(common.HexToAddress(args.OffRamp), args.DestChain) + if err != nil { + return nil, err + } + // Execute. + return offRamp.ManuallyExecute(args.DestUser, offRampProof, manualExecGasLimits) +} + +func (c *CCIPContracts) ExecuteMessage( + t *testing.T, + req logpoller.Log, + txHash common.Hash, + destStartBlock uint64, +) uint64 { + t.Log("Executing request manually") + sendReqReceipt, err := c.Source.Chain.TransactionReceipt(context.Background(), txHash) + require.NoError(t, err) + args := ManualExecArgs{ + SourceChainID: c.Source.ChainID, + DestChainID: c.Dest.ChainID, + DestUser: c.Dest.User, + SourceChain: c.Source.Chain, + DestChain: c.Dest.Chain, + SourceStartBlock: sendReqReceipt.BlockNumber, + DestStartBlock: destStartBlock, + DestLatestBlockNum: c.Dest.Chain.Blockchain().CurrentBlock().Number.Uint64(), + SendReqLogIndex: uint(req.LogIndex), + SendReqTxHash: txHash.String(), + CommitStore: c.Dest.CommitStore.Address().String(), + OnRamp: c.Source.OnRamp.Address().String(), + OffRamp: c.Dest.OffRamp.Address().String(), + } + tx, err := args.ExecuteManually() + require.NoError(t, err) + c.Dest.Chain.Commit() + c.Source.Chain.Commit() + rec, err := c.Dest.Chain.TransactionReceipt(context.Background(), tx.Hash()) + require.NoError(t, err) + require.Equal(t, uint64(1), rec.Status, "manual execution failed") + t.Logf("Manual Execution completed for seqNum %d", args.SeqNr) + return args.SeqNr +} + +func GetBalance(t *testing.T, chain bind.ContractBackend, tokenAddr common.Address, addr common.Address) *big.Int { + token, err := link_token_interface.NewLinkToken(tokenAddr, chain) + require.NoError(t, err) + bal, err := token.BalanceOf(nil, addr) + require.NoError(t, err) + return bal +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/config.go b/core/services/ocr2/plugins/ccip/testhelpers/config.go new file mode 100644 index 00000000000..f70f1954f18 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/config.go @@ -0,0 +1,73 @@ +// Package with set of configs that should be used only within tests suites + +package testhelpers + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" +) + +var PermissionLessExecutionThresholdSeconds = uint32(FirstBlockAge.Seconds()) + +func (c *CCIPContracts) CreateDefaultCommitOnchainConfig(t *testing.T) []byte { + config, err := abihelpers.EncodeAbiStruct(ccipdata.CommitOnchainConfig{ + PriceRegistry: c.Dest.PriceRegistry.Address(), + }) + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultCommitOffchainConfig(t *testing.T) []byte { + return c.createCommitOffchainConfig(t, 10*time.Second, 5*time.Second) +} + +func (c *CCIPContracts) createCommitOffchainConfig(t *testing.T, feeUpdateHearBeat time.Duration, inflightCacheExpiry time.Duration) []byte { + config, err := NewCommitOffchainConfig( + *config.MustNewDuration(feeUpdateHearBeat), + 1, + 1, + *config.MustNewDuration(feeUpdateHearBeat), + 1, + *config.MustNewDuration(inflightCacheExpiry), + ).Encode() + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultExecOnchainConfig(t *testing.T) []byte { + config, err := abihelpers.EncodeAbiStruct(v1_5_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds, + Router: c.Dest.Router.Address(), + PriceRegistry: c.Dest.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxNumberOfTokensPerMsg: 5, + MaxPoolReleaseOrMintGas: 200_000, + MaxTokenTransferGas: 100_000, + }) + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultExecOffchainConfig(t *testing.T) []byte { + return c.createExecOffchainConfig(t, 1*time.Minute, 1*time.Minute) +} + +func (c *CCIPContracts) createExecOffchainConfig(t *testing.T, inflightCacheExpiry time.Duration, rootSnoozeTime time.Duration) []byte { + config, err := NewExecOffchainConfig( + 1, + 5_000_000, + 0.07, + *config.MustNewDuration(inflightCacheExpiry), + *config.MustNewDuration(rootSnoozeTime), + ).Encode() + require.NoError(t, err) + return config +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go b/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go new file mode 100644 index 00000000000..177ccf323b7 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/integration/chainlink.go @@ -0,0 +1,1035 @@ +package integrationtesthelpers + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + types3 "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/jmoiron/sqlx" + "github.com/onsi/gomega" + "github.com/pkg/errors" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "k8s.io/utils/pointer" //nolint:staticcheck + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/offchainreporting2/confighelper" + types4 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/loop" + "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core/mocks" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + v2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + evmUtils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + configv2 "github.com/smartcontractkit/chainlink/v2/core/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/logger/audit" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + feeds2 "github.com/smartcontractkit/chainlink/v2/core/services/feeds" + feedsMocks "github.com/smartcontractkit/chainlink/v2/core/services/feeds/mocks" + pb "github.com/smartcontractkit/chainlink/v2/core/services/feeds/proto" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + ksMocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_5_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" + "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" + evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + clutils "github.com/smartcontractkit/chainlink/v2/core/utils" + "github.com/smartcontractkit/chainlink/v2/core/utils/crypto" + "github.com/smartcontractkit/chainlink/v2/plugins" +) + +const ( + execSpecTemplate = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-exec-1" + externalJobID = "67ffad71-d90f-4fe3-b4e4-494924b707fb" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-execution" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + + [pluginConfig.USDCConfig] + AttestationAPI = "http://blah.com" + SourceMessageTransmitterAddress = "%s" + SourceTokenAddress = "%s" + AttestationAPITimeoutSeconds = 10 + ` + commitSpecTemplatePipeline = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-commit-1" + externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-commit" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + offRamp = "%s" + tokenPricesUSDPipeline = """ + %s + """ + ` + commitSpecTemplateDynamicPriceGetter = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-commit-1" + externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-commit" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + offRamp = "%s" + priceGetterConfig = """ + %s + """ + ` +) + +type Node struct { + App chainlink.Application + Transmitter common.Address + PaymentReceiver common.Address + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) FindJobIDForContract(t *testing.T, addr common.Address) int32 { + jobs := node.App.JobSpawner().ActiveJobs() + for _, j := range jobs { + if j.Type == job.OffchainReporting2 && j.OCR2OracleSpec.ContractID == addr.Hex() { + return j.ID + } + } + t.Fatalf("Could not find job for contract %s", addr.Hex()) + return 0 +} + +func (node *Node) EventuallyNodeUsesUpdatedPriceRegistry(t *testing.T, ccipContracts CCIPIntegrationTestHarness) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + v1_0_0.UsdPerUnitGasUpdated, + ccipContracts.Dest.PriceRegistry.Address(), + 0, + ) + // err can be transient errors such as sql row set empty + if err != nil { + return false + } + return log != nil + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is not using updated price registry %s", ccipContracts.Dest.PriceRegistry.Address().Hex()) + return log +} + +func (node *Node) EventuallyNodeUsesNewCommitConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, commitCfg ccipdata.CommitOnchainConfig) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + evmrelay.OCR2AggregatorLogDecoder.EventSig(), + ccipContracts.Dest.CommitStore.Address(), + 0, + ) + require.NoError(t, err) + var latestCfg ccipdata.CommitOnchainConfig + if log != nil { + latestCfg, err = DecodeCommitOnChainConfig(log.Data) + require.NoError(t, err) + return latestCfg == commitCfg + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg") + return log +} + +func (node *Node) EventuallyNodeUsesNewExecConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, execCfg v1_5_0.ExecOnchainConfig) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + evmrelay.OCR2AggregatorLogDecoder.EventSig(), + ccipContracts.Dest.OffRamp.Address(), + 0, + ) + require.NoError(t, err) + var latestCfg v1_5_0.ExecOnchainConfig + if log != nil { + latestCfg, err = DecodeExecOnChainConfig(log.Data) + require.NoError(t, err) + return latestCfg == execCfg + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg") + return log +} + +func (node *Node) EventuallyHasReqSeqNum(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, onRamp common.Address, seqNum int) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Source.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().LogsDataWordRange( + testutils.Context(t), + v1_2_0.CCIPSendRequestEventSig, + onRamp, + v1_2_0.CCIPSendRequestSeqNumIndex, + abihelpers.EvmWord(uint64(seqNum)), + abihelpers.EvmWord(uint64(seqNum)), + 1, + ) + require.NoError(t, err) + t.Log("Send requested", len(lgs)) + if len(lgs) == 1 { + log = lgs[0] + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has seq num") + return log +} + +func (node *Node) EventuallyHasExecutedSeqNums(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, minSeqNum int, maxSeqNum int) []logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var logs []logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().IndexedLogsTopicRange( + testutils.Context(t), + v1_0_0.ExecutionStateChangedEvent, + offRamp, + v1_0_0.ExecutionStateChangedSeqNrIndex, + abihelpers.EvmWord(uint64(minSeqNum)), + abihelpers.EvmWord(uint64(maxSeqNum)), + 1, + ) + require.NoError(t, err) + t.Logf("Have executed logs %d want %d", len(lgs), maxSeqNum-minSeqNum+1) + if len(lgs) == maxSeqNum-minSeqNum+1 { + logs = lgs + t.Logf("Seq Num %d-%d executed", minSeqNum, maxSeqNum) + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has not executed seq num") + return logs +} + +func (node *Node) ConsistentlySeqNumHasNotBeenExecuted(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, seqNum int) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Consistently(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().IndexedLogsTopicRange( + testutils.Context(t), + v1_0_0.ExecutionStateChangedEvent, + offRamp, + v1_0_0.ExecutionStateChangedSeqNrIndex, + abihelpers.EvmWord(uint64(seqNum)), + abihelpers.EvmWord(uint64(seqNum)), + 1, + ) + require.NoError(t, err) + t.Log("Executed logs", lgs) + if len(lgs) == 1 { + log = lgs[0] + return true + } + return false + }, 10*time.Second, 1*time.Second).Should(gomega.BeFalse(), "seq number got executed") + return log +} + +func (node *Node) AddJob(t *testing.T, spec *OCR2TaskJobSpec) { + specString, err := spec.String() + require.NoError(t, err) + ccipJob, err := validate.ValidatedOracleSpecToml( + testutils.Context(t), + node.App.GetConfig().OCR2(), + node.App.GetConfig().Insecure(), + specString, + // FIXME Ani + nil, + ) + require.NoError(t, err) + err = node.App.AddJobV2(context.Background(), &ccipJob) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec *OCR2TaskJobSpec) { + specString, err := spec.String() + require.NoError(t, err) + ccipJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(specString) + require.NoError(t, err) + err = node.App.AddJobV2(context.Background(), &ccipJob) + require.NoError(t, err) +} + +func (node *Node) AddJobsWithSpec(t *testing.T, jobSpec *OCR2TaskJobSpec) { + // set node specific values + jobSpec.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeyBundle.ID()) + jobSpec.OCR2OracleSpec.TransmitterID.SetValid(node.Transmitter.Hex()) + node.AddJob(t, jobSpec) +} + +func setupNodeCCIP( + t *testing.T, + owner *bind.TransactOpts, + port int64, + dbName string, + sourceChain *backends.SimulatedBackend, destChain *backends.SimulatedBackend, + sourceChainID *big.Int, destChainID *big.Int, + bootstrapPeerID string, + bootstrapPort int64, +) (chainlink.Application, string, common.Address, ocr2key.KeyBundle) { + trueRef, falseRef := true, false + + // Do not want to load fixtures as they contain a dummy chainID. + loglevel := configv2.LogLevel(zap.DebugLevel) + config, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, _ *chainlink.Secrets) { + p2pAddresses := []string{ + fmt.Sprintf("127.0.0.1:%d", port), + } + c.Log.Level = &loglevel + c.Feature.UICSAKeys = &trueRef + c.Feature.FeedsManager = &trueRef + c.OCR.Enabled = &falseRef + c.OCR.DefaultTransactionQueueDepth = pointer.Uint32(200) + c.OCR2.Enabled = &trueRef + c.Feature.LogPoller = &trueRef + c.P2P.V2.Enabled = &trueRef + + dur, err := config.NewDuration(500 * time.Millisecond) + if err != nil { + panic(err) + } + c.P2P.V2.DeltaDial = &dur + + dur2, err := config.NewDuration(5 * time.Second) + if err != nil { + panic(err) + } + + c.P2P.V2.DeltaReconcile = &dur2 + c.P2P.V2.ListenAddresses = &p2pAddresses + c.P2P.V2.AnnounceAddresses = &p2pAddresses + + c.EVM = []*v2.EVMConfig{createConfigV2Chain(sourceChainID), createConfigV2Chain(destChainID)} + + if bootstrapPeerID != "" { + // Supply the bootstrap IP and port as a V2 peer address + c.P2P.V2.DefaultBootstrappers = &[]commontypes.BootstrapperLocator{ + { + PeerID: bootstrapPeerID, Addrs: []string{ + fmt.Sprintf("127.0.0.1:%d", bootstrapPort), + }, + }, + } + } + }) + + lggr := logger.TestLogger(t) + ctx := testutils.Context(t) + + // The in-memory geth sim does not let you create a custom ChainID, it will always be 1337. + // In particular this means that if you sign an eip155 tx, the chainID used MUST be 1337 + // and the CHAINID op code will always emit 1337. To work around this to simulate a "multichain" + // test, we fake different chainIDs using the wrapped sim cltest.SimulatedBackend so the RPC + // appears to operate on different chainIDs and we use an EthKeyStoreSim wrapper which always + // signs 1337 see https://github.com/smartcontractkit/chainlink-ccip/blob/a24dd436810250a458d27d8bb3fb78096afeb79c/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go#L35 + sourceClient := client.NewSimulatedBackendClient(t, sourceChain, sourceChainID) + destClient := client.NewSimulatedBackendClient(t, destChain, destChainID) + csaKeyStore := ksMocks.NewCSA(t) + + key, err := csakey.NewV2() + require.NoError(t, err) + csaKeyStore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + keyStore := NewKsa(db, lggr, csaKeyStore) + + simEthKeyStore := testhelpers.EthKeyStoreSim{ + ETHKS: keyStore.Eth(), + CSAKS: keyStore.CSA(), + } + mailMon := mailbox.NewMonitor("CCIP", lggr.Named("Mailbox")) + evmOpts := chainlink.EVMFactoryConfig{ + ChainOpts: legacyevm.ChainOpts{ + AppConfig: config, + GenEthClient: func(chainID *big.Int) client.Client { + if chainID.String() == sourceChainID.String() { + return sourceClient + } else if chainID.String() == destChainID.String() { + return destClient + } + t.Fatalf("invalid chain ID %v", chainID.String()) + return nil + }, + MailMon: mailMon, + DS: db, + }, + CSAETHKeystore: simEthKeyStore, + } + loopRegistry := plugins.NewLoopRegistry(lggr.Named("LoopRegistry"), config.Tracing()) + relayerFactory := chainlink.RelayerFactory{ + Logger: lggr, + LoopRegistry: loopRegistry, + GRPCOpts: loop.GRPCOpts{}, + CapabilitiesRegistry: coretypes.NewCapabilitiesRegistry(t), + } + testCtx := testutils.Context(t) + // evm alway enabled for backward compatibility + initOps := []chainlink.CoreRelayerChainInitFunc{ + chainlink.InitEVM(testCtx, relayerFactory, evmOpts), + } + + relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) + if err != nil { + t.Fatal(err) + } + + app, err := chainlink.NewApplication(chainlink.ApplicationOpts{ + Config: config, + DS: db, + KeyStore: keyStore, + RelayerChainInteroperators: relayChainInterops, + Logger: lggr, + ExternalInitiatorManager: nil, + CloseLogger: lggr.Sync, + UnrestrictedHTTPClient: &http.Client{}, + RestrictedHTTPClient: &http.Client{}, + AuditLogger: audit.NoopLogger, + MailMon: mailMon, + LoopRegistry: plugins.NewLoopRegistry(lggr, config.Tracing()), + }) + require.NoError(t, err) + require.NoError(t, app.GetKeyStore().Unlock(ctx, "password")) + _, err = app.GetKeyStore().P2P().Create(ctx) + require.NoError(t, err) + + p2pIDs, err := app.GetKeyStore().P2P().GetAll() + require.NoError(t, err) + require.Len(t, p2pIDs, 1) + peerID := p2pIDs[0].PeerID() + + _, err = app.GetKeyStore().Eth().Create(testCtx, destChainID) + require.NoError(t, err) + sendingKeys, err := app.GetKeyStore().Eth().EnabledKeysForChain(testCtx, destChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + transmitter := sendingKeys[0].Address + s, err := app.GetKeyStore().Eth().GetState(testCtx, sendingKeys[0].ID(), destChainID) + require.NoError(t, err) + lggr.Debug(fmt.Sprintf("Transmitter address %s chainID %s", transmitter, s.EVMChainID.String())) + + // Fund the commitTransmitter address with some ETH + n, err := destChain.NonceAt(context.Background(), owner.From, nil) + require.NoError(t, err) + + tx := types3.NewTransaction(n, transmitter, big.NewInt(1000000000000000000), 21000, big.NewInt(1000000000), nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = destChain.SendTransaction(context.Background(), signedTx) + require.NoError(t, err) + destChain.Commit() + + kb, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM) + require.NoError(t, err) + return app, peerID.Raw(), transmitter, kb +} + +func createConfigV2Chain(chainId *big.Int) *v2.EVMConfig { + // NOTE: For the executor jobs, the default of 500k is insufficient for a 3 message batch + defaultGasLimit := uint64(5000000) + tr := true + + sourceC := v2.Defaults((*evmUtils.Big)(chainId)) + sourceC.GasEstimator.LimitDefault = &defaultGasLimit + fixedPrice := "FixedPrice" + sourceC.GasEstimator.Mode = &fixedPrice + d, _ := config.NewDuration(100 * time.Millisecond) + sourceC.LogPollInterval = &d + fd := uint32(2) + sourceC.FinalityDepth = &fd + return &v2.EVMConfig{ + ChainID: (*evmUtils.Big)(chainId), + Enabled: &tr, + Chain: sourceC, + Nodes: v2.EVMNodes{&v2.Node{}}, + } +} + +type CCIPIntegrationTestHarness struct { + testhelpers.CCIPContracts + Nodes []Node + Bootstrap Node +} + +func SetupCCIPIntegrationTH(t *testing.T, sourceChainID, sourceChainSelector, destChainId, destChainSelector uint64) CCIPIntegrationTestHarness { + return CCIPIntegrationTestHarness{ + CCIPContracts: testhelpers.SetupCCIPContracts(t, sourceChainID, sourceChainSelector, destChainId, destChainSelector), + } +} + +func (c *CCIPIntegrationTestHarness) CreatePricesPipeline(t *testing.T) (string, *httptest.Server, *httptest.Server) { + linkUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"UsdPerLink": "8000000000000000000"}`)) + require.NoError(t, err) + })) + t.Cleanup(linkUSD.Close) + + ethUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"UsdPerETH": "1700000000000000000000"}`)) + require.NoError(t, err) + })) + t.Cleanup(ethUSD.Close) + + sourceWrappedNative, err := c.Source.Router.GetWrappedNative(nil) + require.NoError(t, err) + destWrappedNative, err := c.Dest.Router.GetWrappedNative(nil) + require.NoError(t, err) + tokenPricesUSDPipeline := fmt.Sprintf(` +// Price 1 +link [type=http method=GET url="%s"]; +link_parse [type=jsonparse path="UsdPerLink"]; +link->link_parse; +eth [type=http method=GET url="%s"]; +eth_parse [type=jsonparse path="UsdPerETH"]; +eth->eth_parse; +merge [type=merge left="{}" right="{\\\"%s\\\":$(link_parse), \\\"%s\\\":$(eth_parse), \\\"%s\\\":$(eth_parse)}"];`, + linkUSD.URL, ethUSD.URL, c.Dest.LinkToken.Address(), sourceWrappedNative, destWrappedNative) + + return tokenPricesUSDPipeline, linkUSD, ethUSD +} + +func (c *CCIPIntegrationTestHarness) AddAllJobs(t *testing.T, jobParams CCIPJobSpecParams) { + jobParams.OffRamp = c.Dest.OffRamp.Address() + + commitSpec, err := jobParams.CommitJobSpec() + require.NoError(t, err) + geExecutionSpec, err := jobParams.ExecutionJobSpec() + require.NoError(t, err) + nodes := c.Nodes + for _, node := range nodes { + node.AddJobsWithSpec(t, commitSpec) + node.AddJobsWithSpec(t, geExecutionSpec) + } +} + +func (c *CCIPIntegrationTestHarness) jobSpecProposal(t *testing.T, specTemplate string, f func() (*OCR2TaskJobSpec, error), feedsManagerId int64, version int32, opts ...any) feeds2.ProposeJobArgs { + spec, err := f() + require.NoError(t, err) + + args := []any{spec.OCR2OracleSpec.ContractID} + args = append(args, opts...) + + return feeds2.ProposeJobArgs{ + FeedsManagerID: feedsManagerId, + RemoteUUID: uuid.New(), + Multiaddrs: nil, + Version: version, + Spec: fmt.Sprintf(specTemplate, args...), + } +} + +func (c *CCIPIntegrationTestHarness) SetupFeedsManager(t *testing.T) { + ctx := testutils.Context(t) + for _, node := range c.Nodes { + f := node.App.GetFeedsService() + + managers, err := f.ListManagers(ctx) + require.NoError(t, err) + if len(managers) > 0 { + // Use at most one feeds manager, don't register if one already exists + continue + } + + secret := utils.RandomBytes32() + pkey, err := crypto.PublicKeyFromHex(hex.EncodeToString(secret[:])) + require.NoError(t, err) + + m := feeds2.RegisterManagerParams{ + Name: "CCIP", + URI: "http://localhost:8080", + PublicKey: *pkey, + } + + connManager := feedsMocks.NewConnectionsManager(t) + connManager.On("Connect", mock.Anything).Maybe() + connManager.On("GetClient", mock.Anything).Maybe().Return(NoopFeedsClient{}, nil) + connManager.On("Close").Maybe().Return() + connManager.On("IsConnected", mock.Anything).Maybe().Return(true) + f.Unsafe_SetConnectionsManager(connManager) + + _, err = f.RegisterManager(testutils.Context(t), m) + require.NoError(t, err) + } +} + +func (c *CCIPIntegrationTestHarness) ApproveJobSpecs(t *testing.T, jobParams CCIPJobSpecParams) { + ctx := testutils.Context(t) + + for _, node := range c.Nodes { + f := node.App.GetFeedsService() + managers, err := f.ListManagers(ctx) + require.NoError(t, err) + require.Len(t, managers, 1, "expected exactly one feeds manager") + + execSpec := c.jobSpecProposal( + t, + execSpecTemplate, + jobParams.ExecutionJobSpec, + managers[0].ID, + 1, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + utils.RandomAddress().String(), + utils.RandomAddress().String(), + ) + execId, err := f.ProposeJob(ctx, &execSpec) + require.NoError(t, err) + + err = f.ApproveSpec(ctx, execId, true) + require.NoError(t, err) + + var commitSpec feeds2.ProposeJobArgs + if jobParams.TokenPricesUSDPipeline != "" { + commitSpec = c.jobSpecProposal( + t, + commitSpecTemplatePipeline, + jobParams.CommitJobSpec, + managers[0].ID, + 2, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + jobParams.OffRamp.String(), + jobParams.TokenPricesUSDPipeline, + ) + } else { + commitSpec = c.jobSpecProposal( + t, + commitSpecTemplateDynamicPriceGetter, + jobParams.CommitJobSpec, + managers[0].ID, + 2, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + jobParams.OffRamp.String(), + jobParams.PriceGetterConfig, + ) + } + + commitId, err := f.ProposeJob(ctx, &commitSpec) + require.NoError(t, err) + + err = f.ApproveSpec(ctx, commitId, true) + require.NoError(t, err) + } +} + +func (c *CCIPIntegrationTestHarness) AllNodesHaveReqSeqNum(t *testing.T, seqNum int, onRampOpts ...common.Address) logpoller.Log { + var log logpoller.Log + nodes := c.Nodes + var onRamp common.Address + if len(onRampOpts) > 0 { + onRamp = onRampOpts[0] + } else { + require.NotNil(t, c.Source.OnRamp, "no onramp configured") + onRamp = c.Source.OnRamp.Address() + } + for _, node := range nodes { + log = node.EventuallyHasReqSeqNum(t, c, onRamp, seqNum) + } + return log +} + +func (c *CCIPIntegrationTestHarness) AllNodesHaveExecutedSeqNums(t *testing.T, minSeqNum int, maxSeqNum int, offRampOpts ...common.Address) []logpoller.Log { + var logs []logpoller.Log + nodes := c.Nodes + var offRamp common.Address + + if len(offRampOpts) > 0 { + offRamp = offRampOpts[0] + } else { + require.NotNil(t, c.Dest.OffRamp, "no offramp configured") + offRamp = c.Dest.OffRamp.Address() + } + for _, node := range nodes { + logs = node.EventuallyHasExecutedSeqNums(t, c, offRamp, minSeqNum, maxSeqNum) + } + return logs +} + +func (c *CCIPIntegrationTestHarness) NoNodesHaveExecutedSeqNum(t *testing.T, seqNum int, offRampOpts ...common.Address) logpoller.Log { + var log logpoller.Log + nodes := c.Nodes + var offRamp common.Address + if len(offRampOpts) > 0 { + offRamp = offRampOpts[0] + } else { + require.NotNil(t, c.Dest.OffRamp, "no offramp configured") + offRamp = c.Dest.OffRamp.Address() + } + for _, node := range nodes { + log = node.ConsistentlySeqNumHasNotBeenExecuted(t, c, offRamp, seqNum) + } + return log +} + +func (c *CCIPIntegrationTestHarness) EventuallyCommitReportAccepted(t *testing.T, currentBlock uint64, commitStoreOpts ...common.Address) commit_store.CommitStoreCommitReport { + var commitStore *commit_store.CommitStore + var err error + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + g := gomega.NewGomegaWithT(t) + var report commit_store.CommitStoreCommitReport + g.Eventually(func() bool { + it, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: currentBlock}) + g.Expect(err).NotTo(gomega.HaveOccurred(), "Error filtering ReportAccepted event") + g.Expect(it.Next()).To(gomega.BeTrue(), "No ReportAccepted event found") + report = it.Event.Report + if report.MerkleRoot != [32]byte{} { + t.Log("Report Accepted by commitStore") + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "report has not been committed") + return report +} + +func (c *CCIPIntegrationTestHarness) EventuallyExecutionStateChangedToSuccess(t *testing.T, seqNum []uint64, blockNum uint64, offRampOpts ...common.Address) { + var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp + var err error + if len(offRampOpts) > 0 { + offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.OffRamp, "no offRamp configured") + offRamp = c.Dest.OffRamp + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + it, err := offRamp.FilterExecutionStateChanged(&bind.FilterOpts{Start: blockNum}, seqNum, [][32]byte{}) + require.NoError(t, err) + for it.Next() { + if cciptypes.MessageExecutionState(it.Event.State) == cciptypes.ExecutionStateSuccess { + t.Logf("ExecutionStateChanged event found for seqNum %d", it.Event.SequenceNumber) + return true + } + } + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + return false + }, testutils.WaitTimeout(t), time.Second). + Should(gomega.BeTrue(), "ExecutionStateChanged Event") +} + +func (c *CCIPIntegrationTestHarness) EventuallyReportCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) uint64 { + var commitStore *commit_store.CommitStore + var err error + var committedSeqNum uint64 + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + t.Log("next expected seq num reported", minSeqNum) + committedSeqNum = minSeqNum + return minSeqNum > uint64(max) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "report has not been committed") + return committedSeqNum +} + +func (c *CCIPIntegrationTestHarness) EventuallySendRequested(t *testing.T, seqNum uint64, onRampOpts ...common.Address) { + var onRamp *evm_2_evm_onramp.EVM2EVMOnRamp + var err error + if len(onRampOpts) > 0 { + onRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampOpts[0], c.Source.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Source.OnRamp, "no onRamp configured") + onRamp = c.Source.OnRamp + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + it, err := onRamp.FilterCCIPSendRequested(nil) + require.NoError(t, err) + for it.Next() { + if it.Event.Message.SequenceNumber == seqNum { + t.Log("sendRequested generated for", seqNum) + return true + } + } + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + return false + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "sendRequested has not been generated") +} + +func (c *CCIPIntegrationTestHarness) ConsistentlyReportNotCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) { + var commitStore *commit_store.CommitStore + var err error + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + gomega.NewGomegaWithT(t).Consistently(func() bool { + minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + t.Log("min seq num reported", minSeqNum) + return minSeqNum > uint64(max) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeFalse(), "report has been committed") +} + +func (c *CCIPIntegrationTestHarness) SetupAndStartNodes(ctx context.Context, t *testing.T, bootstrapNodePort int64) (Node, []Node, int64) { + appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNodeCCIP(t, c.Dest.User, bootstrapNodePort, + "bootstrap_ccip", c.Source.Chain, c.Dest.Chain, big.NewInt(0).SetUint64(c.Source.ChainID), + big.NewInt(0).SetUint64(c.Dest.ChainID), "", 0) + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + err := appBootstrap.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, appBootstrap.Stop()) + }) + bootstrapNode := Node{ + App: appBootstrap, + Transmitter: bootstrapTransmitter, + KeyBundle: bootstrapKb, + } + // Set up the minimum 4 oracles all funded with destination ETH + for i := int64(0); i < 4; i++ { + app, peerID, transmitter, kb := setupNodeCCIP( + t, + c.Dest.User, + int64(freeport.GetOne(t)), + fmt.Sprintf("oracle_ccip%d", i), + c.Source.Chain, + c.Dest.Chain, + big.NewInt(0).SetUint64(c.Source.ChainID), + big.NewInt(0).SetUint64(c.Dest.ChainID), + bootstrapPeerID, + bootstrapNodePort, + ) + nodes = append(nodes, Node{ + App: app, + Transmitter: transmitter, + KeyBundle: kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: types4.Account(transmitter.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + err = app.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, app.Stop()) + }) + } + + c.Oracles = oracles + commitOnchainConfig := c.CreateDefaultCommitOnchainConfig(t) + commitOffchainConfig := c.CreateDefaultCommitOffchainConfig(t) + execOnchainConfig := c.CreateDefaultExecOnchainConfig(t) + execOffchainConfig := c.CreateDefaultExecOffchainConfig(t) + + configBlock := c.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig) + c.Nodes = nodes + c.Bootstrap = bootstrapNode + return bootstrapNode, nodes, configBlock +} + +func (c *CCIPIntegrationTestHarness) SetUpNodesAndJobs(t *testing.T, pricePipeline string, priceGetterConfig string, usdcAttestationAPI string) CCIPJobSpecParams { + // setup Jobs + ctx := context.Background() + // Starts nodes and configures them in the OCR contracts. + bootstrapNode, _, configBlock := c.SetupAndStartNodes(ctx, t, int64(freeport.GetOne(t))) + + jobParams := c.NewCCIPJobSpecParams(pricePipeline, priceGetterConfig, configBlock, usdcAttestationAPI) + + // Add the bootstrap job + c.Bootstrap.AddBootstrapJob(t, jobParams.BootstrapJob(c.Dest.CommitStore.Address().Hex())) + c.AddAllJobs(t, jobParams) + + // Replay for bootstrap. + bc, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(c.Dest.ChainID, 10)) + require.NoError(t, err) + require.NoError(t, bc.LogPoller().Replay(context.Background(), configBlock)) + c.Dest.Chain.Commit() + + return jobParams +} +func DecodeCommitOnChainConfig(encoded []byte) (ccipdata.CommitOnchainConfig, error) { + var onchainConfig ccipdata.CommitOnchainConfig + unpacked, err := abihelpers.DecodeOCR2Config(encoded) + if err != nil { + return onchainConfig, err + } + onChainCfg := unpacked.OnchainConfig + onchainConfig, err = abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onChainCfg) + if err != nil { + return onchainConfig, err + } + return onchainConfig, nil +} + +func DecodeExecOnChainConfig(encoded []byte) (v1_5_0.ExecOnchainConfig, error) { + var onchainConfig v1_5_0.ExecOnchainConfig + unpacked, err := abihelpers.DecodeOCR2Config(encoded) + if err != nil { + return onchainConfig, errors.Wrap(err, "failed to unpack log data") + } + onChainCfg := unpacked.OnchainConfig + onchainConfig, err = abihelpers.DecodeAbiStruct[v1_5_0.ExecOnchainConfig](onChainCfg) + if err != nil { + return onchainConfig, err + } + return onchainConfig, nil +} + +type ksa struct { + keystore.Master + csa keystore.CSA +} + +func (k *ksa) CSA() keystore.CSA { + return k.csa +} + +func NewKsa(db *sqlx.DB, lggr logger.Logger, csa keystore.CSA) *ksa { + return &ksa{ + Master: keystore.New(db, clutils.FastScryptParams, lggr), + csa: csa, + } +} + +type NoopFeedsClient struct{} + +func (n NoopFeedsClient) ApprovedJob(context.Context, *pb.ApprovedJobRequest) (*pb.ApprovedJobResponse, error) { + return &pb.ApprovedJobResponse{}, nil +} + +func (n NoopFeedsClient) Healthcheck(context.Context, *pb.HealthcheckRequest) (*pb.HealthcheckResponse, error) { + return &pb.HealthcheckResponse{}, nil +} + +func (n NoopFeedsClient) UpdateNode(context.Context, *pb.UpdateNodeRequest) (*pb.UpdateNodeResponse, error) { + return &pb.UpdateNodeResponse{}, nil +} + +func (n NoopFeedsClient) RejectedJob(context.Context, *pb.RejectedJobRequest) (*pb.RejectedJobResponse, error) { + return &pb.RejectedJobResponse{}, nil +} + +func (n NoopFeedsClient) CancelledJob(context.Context, *pb.CancelledJobRequest) (*pb.CancelledJobResponse, error) { + return &pb.CancelledJobResponse{}, nil +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go new file mode 100644 index 00000000000..961e26d1cef --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go @@ -0,0 +1,334 @@ +package integrationtesthelpers + +import ( + "bytes" + "fmt" + "text/template" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/lib/pq" + + "github.com/smartcontractkit/chainlink-common/pkg/types" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/pricegetter" + "github.com/smartcontractkit/chainlink/v2/core/services/relay" + "github.com/smartcontractkit/chainlink/v2/core/store/models" +) + +// OCR2TaskJobSpec represents an OCR2 job that is given to other nodes, meant to communicate with the bootstrap node, +// and provide their answers +type OCR2TaskJobSpec struct { + Name string `toml:"name"` + JobType string `toml:"type"` + MaxTaskDuration string `toml:"maxTaskDuration"` // Optional + ForwardingAllowed bool `toml:"forwardingAllowed"` + OCR2OracleSpec job.OCR2OracleSpec + ObservationSource string `toml:"observationSource"` // List of commands for the Chainlink node +} + +// Type returns the type of the job +func (o *OCR2TaskJobSpec) Type() string { return o.JobType } + +// String representation of the job +func (o *OCR2TaskJobSpec) String() (string, error) { + var feedID string + if o.OCR2OracleSpec.FeedID != nil { + feedID = o.OCR2OracleSpec.FeedID.Hex() + } + specWrap := struct { + Name string + JobType string + MaxTaskDuration string + ForwardingAllowed bool + ContractID string + FeedID string + Relay string + PluginType string + RelayConfig map[string]interface{} + PluginConfig map[string]interface{} + P2PV2Bootstrappers []string + OCRKeyBundleID string + MonitoringEndpoint string + TransmitterID string + BlockchainTimeout time.Duration + TrackerSubscribeInterval time.Duration + TrackerPollInterval time.Duration + ContractConfirmations uint16 + ObservationSource string + }{ + Name: o.Name, + JobType: o.JobType, + ForwardingAllowed: o.ForwardingAllowed, + MaxTaskDuration: o.MaxTaskDuration, + ContractID: o.OCR2OracleSpec.ContractID, + FeedID: feedID, + Relay: o.OCR2OracleSpec.Relay, + PluginType: string(o.OCR2OracleSpec.PluginType), + RelayConfig: o.OCR2OracleSpec.RelayConfig, + PluginConfig: o.OCR2OracleSpec.PluginConfig, + P2PV2Bootstrappers: o.OCR2OracleSpec.P2PV2Bootstrappers, + OCRKeyBundleID: o.OCR2OracleSpec.OCRKeyBundleID.String, + MonitoringEndpoint: o.OCR2OracleSpec.MonitoringEndpoint.String, + TransmitterID: o.OCR2OracleSpec.TransmitterID.String, + BlockchainTimeout: o.OCR2OracleSpec.BlockchainTimeout.Duration(), + ContractConfirmations: o.OCR2OracleSpec.ContractConfigConfirmations, + TrackerPollInterval: o.OCR2OracleSpec.ContractConfigTrackerPollInterval.Duration(), + ObservationSource: o.ObservationSource, + } + ocr2TemplateString := ` +type = "{{ .JobType }}" +name = "{{.Name}}" +forwardingAllowed = {{.ForwardingAllowed}} +{{if .MaxTaskDuration}} +maxTaskDuration = "{{ .MaxTaskDuration }}" {{end}} +{{if .PluginType}} +pluginType = "{{ .PluginType }}" {{end}} +relay = "{{.Relay}}" +schemaVersion = 1 +contractID = "{{.ContractID}}" +{{if .FeedID}} +feedID = "{{.FeedID}}" +{{end}} +{{if eq .JobType "offchainreporting2" }} +ocrKeyBundleID = "{{.OCRKeyBundleID}}" {{end}} +{{if eq .JobType "offchainreporting2" }} +transmitterID = "{{.TransmitterID}}" {{end}} +{{if .BlockchainTimeout}} +blockchainTimeout = "{{.BlockchainTimeout}}" +{{end}} +{{if .ContractConfirmations}} +contractConfigConfirmations = {{.ContractConfirmations}} +{{end}} +{{if .TrackerPollInterval}} +contractConfigTrackerPollInterval = "{{.TrackerPollInterval}}" +{{end}} +{{if .TrackerSubscribeInterval}} +contractConfigTrackerSubscribeInterval = "{{.TrackerSubscribeInterval}}" +{{end}} +{{if .P2PV2Bootstrappers}} +p2pv2Bootstrappers = [{{range .P2PV2Bootstrappers}}"{{.}}",{{end}}]{{end}} +{{if .MonitoringEndpoint}} +monitoringEndpoint = "{{.MonitoringEndpoint}}" {{end}} +{{if .ObservationSource}} +observationSource = """ +{{.ObservationSource}} +"""{{end}} +{{if eq .JobType "offchainreporting2" }} +[pluginConfig]{{range $key, $value := .PluginConfig}} +{{$key}} = {{$value}}{{end}} +{{end}} +[relayConfig]{{range $key, $value := .RelayConfig}} +{{$key}} = {{$value}}{{end}} +` + return MarshallTemplate(specWrap, "OCR2 Job", ocr2TemplateString) +} + +// MarshallTemplate Helper to marshall templates +func MarshallTemplate(jobSpec interface{}, name, templateString string) (string, error) { + var buf bytes.Buffer + tmpl, err := template.New(name).Parse(templateString) + if err != nil { + return "", err + } + err = tmpl.Execute(&buf, jobSpec) + if err != nil { + return "", err + } + return buf.String(), err +} + +type JobType string + +const ( + Commit JobType = "commit" + Execution JobType = "exec" + Boostrap JobType = "bootstrap" +) + +func JobName(jobType JobType, source string, destination, version string) string { + if version != "" { + return fmt.Sprintf("ccip-%s-%s-%s-%s", jobType, source, destination, version) + } + return fmt.Sprintf("ccip-%s-%s-%s", jobType, source, destination) +} + +type CCIPJobSpecParams struct { + Name string + Version string + OffRamp common.Address + CommitStore common.Address + SourceChainName string + DestChainName string + DestEvmChainId uint64 + TokenPricesUSDPipeline string + PriceGetterConfig string + SourceStartBlock uint64 + DestStartBlock uint64 + USDCAttestationAPI string + USDCConfig *config.USDCConfig + P2PV2Bootstrappers pq.StringArray +} + +func (params CCIPJobSpecParams) Validate() error { + if params.CommitStore == common.HexToAddress("0x0") { + return fmt.Errorf("must set commit store address") + } + return nil +} + +func (params CCIPJobSpecParams) ValidateCommitJobSpec() error { + commonErr := params.Validate() + if commonErr != nil { + return commonErr + } + if params.OffRamp == common.HexToAddress("0x0") { + return fmt.Errorf("OffRamp cannot be empty for execution job") + } + // Validate token prices config + // NB: only validate the dynamic price getter config if present since we could also be using the pipeline instead. + // NB: make this test mandatory once we switch to dynamic price getter only. + if params.PriceGetterConfig != "" { + if _, err := pricegetter.NewDynamicPriceGetterConfig(params.PriceGetterConfig); err != nil { + return fmt.Errorf("invalid price getter config: %w", err) + } + } + return nil +} + +func (params CCIPJobSpecParams) ValidateExecJobSpec() error { + commonErr := params.Validate() + if commonErr != nil { + return commonErr + } + if params.OffRamp == common.HexToAddress("0x0") { + return fmt.Errorf("OffRamp cannot be empty for execution job") + } + return nil +} + +// CommitJobSpec generates template for CCIP-relay job spec. +// OCRKeyBundleID,TransmitterID need to be set from the calling function +func (params CCIPJobSpecParams) CommitJobSpec() (*OCR2TaskJobSpec, error) { + err := params.ValidateCommitJobSpec() + if err != nil { + return nil, fmt.Errorf("invalid job spec params: %w", err) + } + + pluginConfig := map[string]interface{}{ + "offRamp": fmt.Sprintf(`"%s"`, params.OffRamp.Hex()), + } + if params.TokenPricesUSDPipeline != "" { + pluginConfig["tokenPricesUSDPipeline"] = fmt.Sprintf(`""" +%s +"""`, params.TokenPricesUSDPipeline) + } + if params.PriceGetterConfig != "" { + pluginConfig["priceGetterConfig"] = fmt.Sprintf(`""" +%s +"""`, params.PriceGetterConfig) + } + + ocrSpec := job.OCR2OracleSpec{ + Relay: relay.NetworkEVM, + PluginType: types.CCIPCommit, + ContractID: params.CommitStore.Hex(), + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: models.Interval(20 * time.Second), + P2PV2Bootstrappers: params.P2PV2Bootstrappers, + PluginConfig: pluginConfig, + RelayConfig: map[string]interface{}{ + "chainID": params.DestEvmChainId, + }, + } + if params.DestStartBlock > 0 { + ocrSpec.PluginConfig["destStartBlock"] = params.DestStartBlock + } + if params.SourceStartBlock > 0 { + ocrSpec.PluginConfig["sourceStartBlock"] = params.SourceStartBlock + } + return &OCR2TaskJobSpec{ + OCR2OracleSpec: ocrSpec, + JobType: "offchainreporting2", + Name: JobName(Commit, params.SourceChainName, params.DestChainName, params.Version), + }, nil +} + +// ExecutionJobSpec generates template for CCIP-execution job spec. +// OCRKeyBundleID,TransmitterID need to be set from the calling function +func (params CCIPJobSpecParams) ExecutionJobSpec() (*OCR2TaskJobSpec, error) { + err := params.ValidateExecJobSpec() + if err != nil { + return nil, err + } + ocrSpec := job.OCR2OracleSpec{ + Relay: relay.NetworkEVM, + PluginType: types.CCIPExecution, + ContractID: params.OffRamp.Hex(), + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: models.Interval(20 * time.Second), + + P2PV2Bootstrappers: params.P2PV2Bootstrappers, + PluginConfig: map[string]interface{}{}, + RelayConfig: map[string]interface{}{ + "chainID": params.DestEvmChainId, + }, + } + if params.DestStartBlock > 0 { + ocrSpec.PluginConfig["destStartBlock"] = params.DestStartBlock + } + if params.SourceStartBlock > 0 { + ocrSpec.PluginConfig["sourceStartBlock"] = params.SourceStartBlock + } + if params.USDCAttestationAPI != "" { + ocrSpec.PluginConfig["USDCConfig.AttestationAPI"] = fmt.Sprintf("\"%s\"", params.USDCAttestationAPI) + ocrSpec.PluginConfig["USDCConfig.SourceTokenAddress"] = fmt.Sprintf("\"%s\"", utils.RandomAddress().String()) + ocrSpec.PluginConfig["USDCConfig.SourceMessageTransmitterAddress"] = fmt.Sprintf("\"%s\"", utils.RandomAddress().String()) + ocrSpec.PluginConfig["USDCConfig.AttestationAPITimeoutSeconds"] = 5 + } + if params.USDCConfig != nil { + ocrSpec.PluginConfig["USDCConfig.AttestationAPI"] = fmt.Sprintf(`"%s"`, params.USDCConfig.AttestationAPI) + ocrSpec.PluginConfig["USDCConfig.SourceTokenAddress"] = fmt.Sprintf(`"%s"`, params.USDCConfig.SourceTokenAddress) + ocrSpec.PluginConfig["USDCConfig.SourceMessageTransmitterAddress"] = fmt.Sprintf(`"%s"`, params.USDCConfig.SourceMessageTransmitterAddress) + ocrSpec.PluginConfig["USDCConfig.AttestationAPITimeoutSeconds"] = params.USDCConfig.AttestationAPITimeoutSeconds + } + return &OCR2TaskJobSpec{ + OCR2OracleSpec: ocrSpec, + JobType: "offchainreporting2", + Name: JobName(Execution, params.SourceChainName, params.DestChainName, params.Version), + }, err +} + +func (params CCIPJobSpecParams) BootstrapJob(contractID string) *OCR2TaskJobSpec { + bootstrapSpec := job.OCR2OracleSpec{ + ContractID: contractID, + Relay: relay.NetworkEVM, + ContractConfigConfirmations: 1, + ContractConfigTrackerPollInterval: models.Interval(20 * time.Second), + RelayConfig: map[string]interface{}{ + "chainID": params.DestEvmChainId, + }, + } + return &OCR2TaskJobSpec{ + Name: fmt.Sprintf("%s-%s", Boostrap, params.DestChainName), + JobType: "bootstrap", + OCR2OracleSpec: bootstrapSpec, + } +} + +func (c *CCIPIntegrationTestHarness) NewCCIPJobSpecParams(tokenPricesUSDPipeline string, priceGetterConfig string, configBlock int64, usdcAttestationAPI string) CCIPJobSpecParams { + return CCIPJobSpecParams{ + CommitStore: c.Dest.CommitStore.Address(), + OffRamp: c.Dest.OffRamp.Address(), + DestEvmChainId: c.Dest.ChainID, + SourceChainName: "SimulatedSource", + DestChainName: "SimulatedDest", + TokenPricesUSDPipeline: tokenPricesUSDPipeline, + PriceGetterConfig: priceGetterConfig, + DestStartBlock: uint64(configBlock), + USDCAttestationAPI: usdcAttestationAPI, + } +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/offramp.go b/core/services/ocr2/plugins/ccip/testhelpers/offramp.go new file mode 100644 index 00000000000..d10e693325d --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/offramp.go @@ -0,0 +1,119 @@ +package testhelpers + +import ( + "sync" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp" + mock_contracts "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" +) + +type FakeOffRamp struct { + *mock_contracts.EVM2EVMOffRampInterface + + rateLimiterState cciptypes.TokenBucketRateLimit + senderNonces map[common.Address]uint64 + tokenToPool map[common.Address]common.Address + dynamicConfig evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig + sourceToDestTokens map[common.Address]common.Address + + mu sync.RWMutex +} + +func NewFakeOffRamp(t *testing.T) (*FakeOffRamp, common.Address) { + addr := utils.RandomAddress() + mockOffRamp := mock_contracts.NewEVM2EVMOffRampInterface(t) + mockOffRamp.On("Address").Return(addr).Maybe() + + offRamp := &FakeOffRamp{EVM2EVMOffRampInterface: mockOffRamp} + return offRamp, addr +} + +func (o *FakeOffRamp) CurrentRateLimiterState(opts *bind.CallOpts) (cciptypes.TokenBucketRateLimit, error) { + return getOffRampVal(o, func(o *FakeOffRamp) (cciptypes.TokenBucketRateLimit, error) { return o.rateLimiterState, nil }) +} + +func (o *FakeOffRamp) SetRateLimiterState(state cciptypes.TokenBucketRateLimit) { + setOffRampVal(o, func(o *FakeOffRamp) { o.rateLimiterState = state }) +} + +func (o *FakeOffRamp) GetSenderNonce(opts *bind.CallOpts, sender common.Address) (uint64, error) { + return getOffRampVal(o, func(o *FakeOffRamp) (uint64, error) { return o.senderNonces[sender], nil }) +} + +func (o *FakeOffRamp) SetSenderNonces(senderNonces map[cciptypes.Address]uint64) { + evmSenderNonces := make(map[common.Address]uint64) + for k, v := range senderNonces { + addrs, _ := ccipcalc.GenericAddrsToEvm(k) + evmSenderNonces[addrs[0]] = v + } + + setOffRampVal(o, func(o *FakeOffRamp) { o.senderNonces = evmSenderNonces }) +} + +func (o *FakeOffRamp) GetPoolByDestToken(opts *bind.CallOpts, destToken common.Address) (common.Address, error) { + return getOffRampVal(o, func(o *FakeOffRamp) (common.Address, error) { + addr, exists := o.tokenToPool[destToken] + if !exists { + return common.Address{}, errors.New("not found") + } + return addr, nil + }) +} + +func (o *FakeOffRamp) SetTokenPools(tokenToPool map[common.Address]common.Address) { + setOffRampVal(o, func(o *FakeOffRamp) { o.tokenToPool = tokenToPool }) +} + +func (o *FakeOffRamp) GetDynamicConfig(opts *bind.CallOpts) (evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig, error) { + return getOffRampVal(o, func(o *FakeOffRamp) (evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig, error) { + return o.dynamicConfig, nil + }) +} + +func (o *FakeOffRamp) SetDynamicConfig(cfg evm_2_evm_offramp.EVM2EVMOffRampDynamicConfig) { + setOffRampVal(o, func(o *FakeOffRamp) { o.dynamicConfig = cfg }) +} + +func (o *FakeOffRamp) SetSourceToDestTokens(m map[common.Address]common.Address) { + setOffRampVal(o, func(o *FakeOffRamp) { o.sourceToDestTokens = m }) +} + +func (o *FakeOffRamp) GetSupportedTokens(opts *bind.CallOpts) ([]common.Address, error) { + return getOffRampVal(o, func(o *FakeOffRamp) ([]common.Address, error) { + tks := make([]common.Address, 0, len(o.sourceToDestTokens)) + for tk := range o.sourceToDestTokens { + tks = append(tks, tk) + } + return tks, nil + }) +} + +func (o *FakeOffRamp) GetDestinationTokens(opts *bind.CallOpts) ([]common.Address, error) { + return getOffRampVal(o, func(o *FakeOffRamp) ([]common.Address, error) { + tokens := make([]common.Address, 0, len(o.sourceToDestTokens)) + for _, dst := range o.sourceToDestTokens { + tokens = append(tokens, dst) + } + return tokens, nil + }) +} + +func getOffRampVal[T any](o *FakeOffRamp, getter func(o *FakeOffRamp) (T, error)) (T, error) { + o.mu.RLock() + defer o.mu.RUnlock() + return getter(o) +} + +func setOffRampVal(o *FakeOffRamp, setter func(o *FakeOffRamp)) { + o.mu.Lock() + defer o.mu.Unlock() + setter(o) +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go b/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go new file mode 100644 index 00000000000..ea91362aaae --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go @@ -0,0 +1,75 @@ +package testhelpers + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" +) + +// FirstBlockAge is used to compute first block's timestamp in SimulatedBackend (time.Now() - FirstBlockAge) +const FirstBlockAge = 24 * time.Hour + +func SetupChain(t *testing.T) (*backends.SimulatedBackend, *bind.TransactOpts) { + key, err := crypto.GenerateKey() + require.NoError(t, err) + user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + require.NoError(t, err) + chain := backends.NewSimulatedBackend(core.GenesisAlloc{ + user.From: {Balance: new(big.Int).Mul(big.NewInt(1000), big.NewInt(1e18))}}, + ethconfig.Defaults.Miner.GasCeil) + // CCIP relies on block timestamps, but SimulatedBackend uses by default clock starting from 1970-01-01 + // This trick is used to move the clock closer to the current time. We set first block to be X hours ago. + // Tests create plenty of transactions so this number can't be too low, every new block mined will tick the clock, + // if you mine more than "X hours" transactions, SimulatedBackend will panic because generated timestamps will be in the future. + // IMPORTANT: Any adjustments to FirstBlockAge will automatically update PermissionLessExecutionThresholdSeconds in tests + blockTime := time.UnixMilli(int64(chain.Blockchain().CurrentHeader().Time)) + err = chain.AdjustTime(time.Since(blockTime) - FirstBlockAge) + require.NoError(t, err) + chain.Commit() + return chain, user +} + +type EthKeyStoreSim struct { + ETHKS keystore.Eth + CSAKS keystore.CSA +} + +func (ks EthKeyStoreSim) CSA() keystore.CSA { + return ks.CSAKS +} + +func (ks EthKeyStoreSim) Eth() keystore.Eth { + return ks.ETHKS +} + +func (ks EthKeyStoreSim) SignTx(address common.Address, tx *ethtypes.Transaction, chainID *big.Int) (*ethtypes.Transaction, error) { + if chainID.String() == "1000" { + // A terrible hack, just for the multichain test. All simulation clients run on chainID 1337. + // We let the DestChainSelector actually use 1337 to make sure the offchainConfig digests are properly generated. + return ks.ETHKS.SignTx(context.Background(), address, tx, big.NewInt(1337)) + } + return ks.ETHKS.SignTx(context.Background(), address, tx, chainID) +} + +var _ keystore.Eth = EthKeyStoreSim{}.ETHKS + +func ConfirmTxs(t *testing.T, txs []*ethtypes.Transaction, chain *backends.SimulatedBackend) { + chain.Commit() + for _, tx := range txs { + rec, err := bind.WaitMined(context.Background(), chain, tx) + require.NoError(t, err) + require.Equal(t, uint64(1), rec.Status) + } +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/structfields.go b/core/services/ocr2/plugins/ccip/testhelpers/structfields.go new file mode 100644 index 00000000000..88e0fffa672 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/structfields.go @@ -0,0 +1,44 @@ +package testhelpers + +import ( + "fmt" + "reflect" + "strings" +) + +// FindStructFieldsOfCertainType recursively iterates over struct fields and returns all the fields of the provided type. +func FindStructFieldsOfCertainType(targetType string, v any) []string { + typesAndFields := TypesAndFields("", reflect.ValueOf(v)) + results := make([]string, 0) + for _, field := range typesAndFields { + if strings.Contains(field, targetType) { + results = append(results, field) + } + } + return results +} + +// TypesAndFields will find and return all the fields and their types of the provided value. +// NOTE: This is not intended for production use, it's a helper method for tests. +func TypesAndFields(prefix string, v reflect.Value) []string { + results := make([]string, 0) + + s := v + typeOfT := s.Type() + for i := 0; i < s.NumField(); i++ { + f := s.Field(i) + typeAndName := fmt.Sprintf("%s%s %v", prefix, f.Type(), typeOfT.Field(i).Name) + results = append(results, typeAndName) + + if f.Kind().String() == "ptr" { + results = append(results, TypesAndFields(typeOfT.Field(i).Name, f.Elem())...) + } + + if f.Kind().String() == "struct" { + x1 := reflect.ValueOf(f.Interface()) + results = append(results, TypesAndFields(typeOfT.Field(i).Name, x1)...) + } + } + + return results +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go new file mode 100644 index 00000000000..4ea5bb18d7e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/ccip_contracts_1_4_0.go @@ -0,0 +1,1585 @@ +package testhelpers_1_4_0 + +import ( + "context" + "fmt" + "math" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/libocr/offchainreporting2/confighelper" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types" + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/hashutil" + "github.com/smartcontractkit/chainlink-common/pkg/merklemulti" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/arm_proxy_contract" + burn_mint_token_pool "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/burn_mint_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + evm_2_evm_offramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + evm_2_evm_onramp "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_0_0" + lock_release_token_pool "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/lock_release_token_pool_1_4_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/maybe_revert_message_receiver" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/mock_arm_contract" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/price_registry_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/weth9" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/shared/generated/burn_mint_erc677" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" +) + +var ( + // Source + SourcePool = "source Link pool" + SourcePriceRegistry = "source PriceRegistry" + OnRamp = "onramp" + OnRampNative = "onramp-native" + SourceRouter = "source router" + + // Dest + OffRamp = "offramp" + DestPool = "dest Link pool" + + Receiver = "receiver" + Sender = "sender" + Link = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) } + HundredLink = Link(100) + LinkUSDValue = func(amount int64) *big.Int { return new(big.Int).Mul(big.NewInt(1e18), big.NewInt(amount)) } + SourceChainID = uint64(1000) + SourceChainSelector = uint64(11787463284727550157) + DestChainID = uint64(1337) + DestChainSelector = uint64(3379446385462418246) +) + +// Backwards compat, in principle these statuses are version dependent +// TODO: Adjust integration tests to be version agnostic using readers +var ( + ExecutionStateSuccess = MessageExecutionState(cciptypes.ExecutionStateSuccess) + ExecutionStateFailure = MessageExecutionState(cciptypes.ExecutionStateFailure) +) + +type MessageExecutionState cciptypes.MessageExecutionState +type CommitOffchainConfig struct { + v1_2_0.JSONCommitOffchainConfig +} + +func (c CommitOffchainConfig) Encode() ([]byte, error) { + return ccipconfig.EncodeOffchainConfig(c.JSONCommitOffchainConfig) +} + +func NewCommitOffchainConfig( + GasPriceHeartBeat config.Duration, + DAGasPriceDeviationPPB uint32, + ExecGasPriceDeviationPPB uint32, + TokenPriceHeartBeat config.Duration, + TokenPriceDeviationPPB uint32, + InflightCacheExpiry config.Duration) CommitOffchainConfig { + return CommitOffchainConfig{v1_2_0.JSONCommitOffchainConfig{ + GasPriceHeartBeat: GasPriceHeartBeat, + DAGasPriceDeviationPPB: DAGasPriceDeviationPPB, + ExecGasPriceDeviationPPB: ExecGasPriceDeviationPPB, + TokenPriceHeartBeat: TokenPriceHeartBeat, + TokenPriceDeviationPPB: TokenPriceDeviationPPB, + InflightCacheExpiry: InflightCacheExpiry, + }} +} + +type CommitOnchainConfig struct { + ccipdata.CommitOnchainConfig +} + +func NewCommitOnchainConfig( + PriceRegistry common.Address, +) CommitOnchainConfig { + return CommitOnchainConfig{ccipdata.CommitOnchainConfig{ + PriceRegistry: PriceRegistry, + }} +} + +type ExecOnchainConfig struct { + v1_2_0.ExecOnchainConfig +} + +func NewExecOnchainConfig( + PermissionLessExecutionThresholdSeconds uint32, + Router common.Address, + PriceRegistry common.Address, + MaxNumberOfTokensPerMsg uint16, + MaxDataBytes uint32, + MaxPoolReleaseOrMintGas uint32, +) ExecOnchainConfig { + return ExecOnchainConfig{v1_2_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds, + Router: Router, + PriceRegistry: PriceRegistry, + MaxNumberOfTokensPerMsg: MaxNumberOfTokensPerMsg, + MaxDataBytes: MaxDataBytes, + MaxPoolReleaseOrMintGas: MaxPoolReleaseOrMintGas, + }} +} + +type ExecOffchainConfig struct { + v1_2_0.JSONExecOffchainConfig +} + +func (c ExecOffchainConfig) Encode() ([]byte, error) { + return ccipconfig.EncodeOffchainConfig(c.JSONExecOffchainConfig) +} + +func NewExecOffchainConfig( + DestOptimisticConfirmations uint32, + BatchGasLimit uint32, + RelativeBoostPerWaitHour float64, + InflightCacheExpiry config.Duration, + RootSnoozeTime config.Duration, +) ExecOffchainConfig { + return ExecOffchainConfig{v1_2_0.JSONExecOffchainConfig{ + DestOptimisticConfirmations: DestOptimisticConfirmations, + BatchGasLimit: BatchGasLimit, + RelativeBoostPerWaitHour: RelativeBoostPerWaitHour, + InflightCacheExpiry: InflightCacheExpiry, + RootSnoozeTime: RootSnoozeTime, + }} +} + +type MaybeRevertReceiver struct { + Receiver *maybe_revert_message_receiver.MaybeRevertMessageReceiver + Strict bool +} + +type Common struct { + ChainID uint64 + ChainSelector uint64 + User *bind.TransactOpts + Chain *backends.SimulatedBackend + LinkToken *link_token_interface.LinkToken + LinkTokenPool *lock_release_token_pool.LockReleaseTokenPool + CustomToken *link_token_interface.LinkToken + WrappedNative *weth9.WETH9 + WrappedNativePool *lock_release_token_pool_1_0_0.LockReleaseTokenPool + ARM *mock_arm_contract.MockARMContract + ARMProxy *arm_proxy_contract.ARMProxyContract + PriceRegistry *price_registry_1_2_0.PriceRegistry +} + +type SourceChain struct { + Common + Router *router.Router + OnRamp *evm_2_evm_onramp.EVM2EVMOnRamp +} + +type DestinationChain struct { + Common + + CommitStore *commit_store_1_2_0.CommitStore + Router *router.Router + OffRamp *evm_2_evm_offramp.EVM2EVMOffRamp + Receivers []MaybeRevertReceiver +} + +type OCR2Config struct { + Signers []common.Address + Transmitters []common.Address + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte +} + +type BalanceAssertion struct { + Name string + Address common.Address + Expected string + Getter func(t *testing.T, addr common.Address) *big.Int + Within string +} + +type BalanceReq struct { + Name string + Addr common.Address + Getter func(t *testing.T, addr common.Address) *big.Int +} + +type CCIPContracts struct { + Source SourceChain + Dest DestinationChain + Oracles []confighelper.OracleIdentityExtra + + commitOCRConfig, execOCRConfig *OCR2Config +} + +func (c *CCIPContracts) DeployNewOffRamp(t *testing.T) { + prevOffRamp := common.HexToAddress("") + if c.Dest.OffRamp != nil { + prevOffRamp = c.Dest.OffRamp.Address() + } + offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + c.Dest.User, + c.Dest.Chain, + evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: c.Dest.CommitStore.Address(), + ChainSelector: c.Dest.ChainSelector, + SourceChainSelector: c.Source.ChainSelector, + OnRamp: c.Source.OnRamp.Address(), + PrevOffRamp: prevOffRamp, + ArmProxy: c.Dest.ARMProxy.Address(), + }, + []common.Address{c.Source.LinkToken.Address()}, // source tokens + []common.Address{c.Dest.LinkTokenPool.Address()}, // pools + evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() + + c.Dest.OffRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, c.Dest.Chain) + require.NoError(t, err) + + c.Dest.Chain.Commit() + c.Source.Chain.Commit() +} + +func (c *CCIPContracts) EnableOffRamp(t *testing.T) { + _, err := c.Dest.Router.ApplyRampUpdates(c.Dest.User, nil, nil, []router.RouterOffRamp{{SourceChainSelector: SourceChainSelector, OffRamp: c.Dest.OffRamp.Address()}}) + require.NoError(t, err) + c.Dest.Chain.Commit() + + onChainConfig := c.CreateDefaultExecOnchainConfig(t) + offChainConfig := c.CreateDefaultExecOffchainConfig(t) + + c.SetupExecOCR2Config(t, onChainConfig, offChainConfig) +} + +func (c *CCIPContracts) EnableCommitStore(t *testing.T) { + onChainConfig := c.CreateDefaultCommitOnchainConfig(t) + offChainConfig := c.CreateDefaultCommitOffchainConfig(t) + + c.SetupCommitOCR2Config(t, onChainConfig, offChainConfig) + + _, err := c.Dest.PriceRegistry.ApplyPriceUpdatersUpdates(c.Dest.User, []common.Address{c.Dest.CommitStore.Address()}, []common.Address{}) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) DeployNewOnRamp(t *testing.T) { + t.Log("Deploying new onRamp") + // find the last onRamp + prevOnRamp := common.HexToAddress("") + if c.Source.OnRamp != nil { + prevOnRamp = c.Source.OnRamp.Address() + } + onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + c.Source.User, // user + c.Source.Chain, // client + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: c.Source.LinkToken.Address(), + ChainSelector: c.Source.ChainSelector, + DestChainSelector: c.Dest.ChainSelector, + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: prevOnRamp, + ArmProxy: c.Source.ARM.Address(), // ARM + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: c.Source.Router.Address(), + MaxNumberOfTokensPerMsg: 5, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: c.Source.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxPerMsgGasLimit: 4_000_000, + }, + []evm_2_evm_onramp.InternalPoolUpdate{ + { + Token: c.Source.LinkToken.Address(), + Pool: c.Source.LinkTokenPool.Address(), + }, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: c.Source.LinkToken.Address(), + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 9e17, + Enabled: true, + }, + { + Token: c.Source.WrappedNative.Address(), + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: c.Source.LinkToken.Address(), + MinFeeUSDCents: 50, // $0.5 + MaxFeeUSDCents: 1_000_000_00, // $ 1 million + DeciBps: 5_0, // 5 bps + DestGasOverhead: 34_000, + DestBytesOverhead: 32, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + c.Source.OnRamp, err = evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, c.Source.Chain) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) EnableOnRamp(t *testing.T) { + t.Log("Setting onRamp on source router") + _, err := c.Source.Router.ApplyRampUpdates(c.Source.User, []router.RouterOnRamp{{DestChainSelector: c.Dest.ChainSelector, OnRamp: c.Source.OnRamp.Address()}}, nil, nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) DeployNewCommitStore(t *testing.T) { + commitStoreAddress, _, _, err := commit_store_1_2_0.DeployCommitStore( + c.Dest.User, // user + c.Dest.Chain, // client + commit_store_1_2_0.CommitStoreStaticConfig{ + ChainSelector: c.Dest.ChainSelector, + SourceChainSelector: c.Source.ChainSelector, + OnRamp: c.Source.OnRamp.Address(), + ArmProxy: c.Dest.ARMProxy.Address(), + }, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() + // since CommitStoreHelper derives from CommitStore, it's safe to instantiate both on same address + c.Dest.CommitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreAddress, c.Dest.Chain) + require.NoError(t, err) +} + +func (c *CCIPContracts) DeployNewPriceRegistry(t *testing.T) { + t.Log("Deploying new Price Registry") + destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + c.Dest.User, + c.Dest.Chain, + []common.Address{c.Dest.CommitStore.Address()}, + []common.Address{c.Dest.LinkToken.Address()}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + c.Dest.PriceRegistry, err = price_registry_1_2_0.NewPriceRegistry(destPricesAddress, c.Dest.Chain) + require.NoError(t, err) + + priceUpdates := price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{ + { + SourceToken: c.Dest.LinkToken.Address(), + UsdPerToken: big.NewInt(8e18), // 8usd + }, + { + SourceToken: c.Dest.WrappedNative.Address(), + UsdPerToken: big.NewInt(1e18), // 1usd + }, + }, + GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{ + { + DestChainSelector: c.Source.ChainSelector, + UsdPerUnitGas: big.NewInt(2000e9), // $2000 per eth * 1gwei = 2000e9 + }, + }, + } + _, err = c.Dest.PriceRegistry.UpdatePrices(c.Dest.User, priceUpdates) + require.NoError(t, err) + + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + + t.Logf("New Price Registry deployed at %s", destPricesAddress.String()) +} + +func (c *CCIPContracts) SetNopsOnRamp(t *testing.T, nopsAndWeights []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight) { + tx, err := c.Source.OnRamp.SetNops(c.Source.User, nopsAndWeights) + require.NoError(t, err) + c.Source.Chain.Commit() + _, err = bind.WaitMined(context.Background(), c.Source.Chain, tx) + require.NoError(t, err) +} + +func (c *CCIPContracts) GetSourceLinkBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Source.Chain, c.Source.LinkToken.Address(), addr) +} + +func (c *CCIPContracts) GetDestLinkBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Dest.Chain, c.Dest.LinkToken.Address(), addr) +} + +func (c *CCIPContracts) GetSourceWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Source.Chain, c.Source.WrappedNative.Address(), addr) +} + +func (c *CCIPContracts) GetDestWrappedTokenBalance(t *testing.T, addr common.Address) *big.Int { + return GetBalance(t, c.Dest.Chain, c.Dest.WrappedNative.Address(), addr) +} + +func (c *CCIPContracts) AssertBalances(t *testing.T, bas []BalanceAssertion) { + for _, b := range bas { + actual := b.Getter(t, b.Address) + t.Log("Checking balance for", b.Name, "at", b.Address.Hex(), "got", actual) + require.NotNil(t, actual, "%v getter return nil", b.Name) + if b.Within == "" { + require.Equal(t, b.Expected, actual.String(), "wrong balance for %s got %s want %s", b.Name, actual, b.Expected) + } else { + bi, _ := big.NewInt(0).SetString(b.Expected, 10) + withinI, _ := big.NewInt(0).SetString(b.Within, 10) + high := big.NewInt(0).Add(bi, withinI) + low := big.NewInt(0).Sub(bi, withinI) + require.Equal(t, -1, actual.Cmp(high), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + require.Equal(t, 1, actual.Cmp(low), "wrong balance for %s got %s outside expected range [%s, %s]", b.Name, actual, low, high) + } + } +} + +func AccountToAddress(accounts []ocr2types.Account) (addresses []common.Address, err error) { + for _, signer := range accounts { + bytes, err := hexutil.Decode(string(signer)) + if err != nil { + return []common.Address{}, errors.Wrap(err, fmt.Sprintf("given address is not valid %s", signer)) + } + if len(bytes) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(bytes)) + } + return addresses, nil +} + +func OnchainPublicKeyToAddress(publicKeys []ocrtypes.OnchainPublicKey) (addresses []common.Address, err error) { + for _, signer := range publicKeys { + if len(signer) != 20 { + return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer) + } + addresses = append(addresses, common.BytesToAddress(signer)) + } + return addresses, nil +} + +func (c *CCIPContracts) DeriveOCR2Config(t *testing.T, oracles []confighelper.OracleIdentityExtra, rawOnchainConfig []byte, rawOffchainConfig []byte) *OCR2Config { + signers, transmitters, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper.ContractSetConfigArgsForTests( + 2*time.Second, // deltaProgress + 1*time.Second, // deltaResend + 1*time.Second, // deltaRound + 500*time.Millisecond, // deltaGrace + 2*time.Second, // deltaStage + 3, + []int{1, 1, 1, 1}, + oracles, + rawOffchainConfig, + 50*time.Millisecond, // Max duration query + 1*time.Second, // Max duration observation + 100*time.Millisecond, + 100*time.Millisecond, + 100*time.Millisecond, + 1, // faults + rawOnchainConfig, + ) + require.NoError(t, err) + lggr := logger.TestLogger(t) + lggr.Infow("Setting Config on Oracle Contract", + "signers", signers, + "transmitters", transmitters, + "threshold", threshold, + "onchainConfig", onchainConfig, + "encodedConfigVersion", offchainConfigVersion, + ) + signerAddresses, err := OnchainPublicKeyToAddress(signers) + require.NoError(t, err) + transmitterAddresses, err := AccountToAddress(transmitters) + require.NoError(t, err) + + return &OCR2Config{ + Signers: signerAddresses, + Transmitters: transmitterAddresses, + F: threshold, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } +} + +func (c *CCIPContracts) SetupCommitOCR2Config(t *testing.T, commitOnchainConfig, commitOffchainConfig []byte) { + c.commitOCRConfig = c.DeriveOCR2Config(t, c.Oracles, commitOnchainConfig, commitOffchainConfig) + // Set the DON on the commit store + _, err := c.Dest.CommitStore.SetOCR2Config( + c.Dest.User, + c.commitOCRConfig.Signers, + c.commitOCRConfig.Transmitters, + c.commitOCRConfig.F, + c.commitOCRConfig.OnchainConfig, + c.commitOCRConfig.OffchainConfigVersion, + c.commitOCRConfig.OffchainConfig, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) SetupExecOCR2Config(t *testing.T, execOnchainConfig, execOffchainConfig []byte) { + c.execOCRConfig = c.DeriveOCR2Config(t, c.Oracles, execOnchainConfig, execOffchainConfig) + // Same DON on the offramp + _, err := c.Dest.OffRamp.SetOCR2Config( + c.Dest.User, + c.execOCRConfig.Signers, + c.execOCRConfig.Transmitters, + c.execOCRConfig.F, + c.execOCRConfig.OnchainConfig, + c.execOCRConfig.OffchainConfigVersion, + c.execOCRConfig.OffchainConfig, + ) + require.NoError(t, err) + c.Dest.Chain.Commit() +} + +func (c *CCIPContracts) SetupOnchainConfig(t *testing.T, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig []byte) int64 { + // Note We do NOT set the payees, payment is done in the OCR2Base implementation + blockBeforeConfig, err := c.Dest.Chain.BlockByNumber(context.Background(), nil) + require.NoError(t, err) + + c.SetupCommitOCR2Config(t, commitOnchainConfig, commitOffchainConfig) + c.SetupExecOCR2Config(t, execOnchainConfig, execOffchainConfig) + + return blockBeforeConfig.Number().Int64() +} + +func (c *CCIPContracts) SetupLockAndMintTokenPool( + sourceTokenAddress common.Address, + wrappedTokenName, + wrappedTokenSymbol string) (common.Address, *burn_mint_erc677.BurnMintERC677, error) { + // Deploy dest token & pool + destTokenAddress, _, _, err := burn_mint_erc677.DeployBurnMintERC677(c.Dest.User, c.Dest.Chain, wrappedTokenName, wrappedTokenSymbol, 18, big.NewInt(0)) + if err != nil { + return [20]byte{}, nil, err + } + c.Dest.Chain.Commit() + + destToken, err := burn_mint_erc677.NewBurnMintERC677(destTokenAddress, c.Dest.Chain) + if err != nil { + return [20]byte{}, nil, err + } + + destPoolAddress, _, destPool, err := burn_mint_token_pool.DeployBurnMintTokenPool( + c.Dest.User, + c.Dest.Chain, + destTokenAddress, + []common.Address{}, // pool originalSender allowList + c.Dest.ARMProxy.Address(), + c.Dest.Router.Address(), + ) + if err != nil { + return [20]byte{}, nil, err + } + c.Dest.Chain.Commit() + + _, err = destToken.GrantMintAndBurnRoles(c.Dest.User, destPoolAddress) + if err != nil { + return [20]byte{}, nil, err + } + + _, err = destPool.ApplyChainUpdates(c.Dest.User, + []burn_mint_token_pool.TokenPoolChainUpdate{ + { + RemoteChainSelector: c.Source.ChainSelector, + Allowed: true, + OutboundRateLimiterConfig: burn_mint_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: burn_mint_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }, + }) + if err != nil { + return [20]byte{}, nil, err + } + c.Dest.Chain.Commit() + + sourcePoolAddress, _, sourcePool, err := lock_release_token_pool.DeployLockReleaseTokenPool( + c.Source.User, + c.Source.Chain, + sourceTokenAddress, + []common.Address{}, // empty allowList at deploy time indicates pool has no original sender restrictions + c.Source.ARMProxy.Address(), + true, + c.Source.Router.Address(), + ) + if err != nil { + return [20]byte{}, nil, err + } + c.Source.Chain.Commit() + + // set onRamp as valid caller for source pool + _, err = sourcePool.ApplyChainUpdates(c.Source.User, []lock_release_token_pool.TokenPoolChainUpdate{ + { + RemoteChainSelector: c.Dest.ChainSelector, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }, + }) + if err != nil { + return [20]byte{}, nil, err + } + c.Source.Chain.Commit() + + wrappedNativeAddress, err := c.Source.Router.GetWrappedNative(nil) + if err != nil { + return [20]byte{}, nil, err + } + + //native token is used as fee token + _, err = c.Source.PriceRegistry.UpdatePrices(c.Source.User, price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{ + { + SourceToken: sourceTokenAddress, + UsdPerToken: big.NewInt(5), + }, + }, + GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{}, + }) + if err != nil { + return [20]byte{}, nil, err + } + c.Source.Chain.Commit() + + _, err = c.Source.PriceRegistry.ApplyFeeTokensUpdates(c.Source.User, []common.Address{wrappedNativeAddress}, nil) + if err != nil { + return [20]byte{}, nil, err + } + c.Source.Chain.Commit() + + // add new token pool created above + _, err = c.Source.OnRamp.ApplyPoolUpdates(c.Source.User, nil, []evm_2_evm_onramp.InternalPoolUpdate{ + { + Token: sourceTokenAddress, + Pool: sourcePoolAddress, + }, + }) + if err != nil { + return [20]byte{}, nil, err + } + + _, err = c.Dest.OffRamp.ApplyPoolUpdates(c.Dest.User, nil, []evm_2_evm_offramp.InternalPoolUpdate{ + { + Token: sourceTokenAddress, + Pool: destPoolAddress, + }, + }) + if err != nil { + return [20]byte{}, nil, err + } + c.Dest.Chain.Commit() + + return sourcePoolAddress, destToken, err +} + +func (c *CCIPContracts) SendMessage(t *testing.T, gasLimit, tokenAmount *big.Int, receiverAddr common.Address) { + extraArgs, err := GetEVMExtraArgsV1(gasLimit, false) + require.NoError(t, err) + msg := router.ClientEVM2AnyMessage{ + Receiver: MustEncodeAddress(t, receiverAddr), + Data: []byte("hello"), + TokenAmounts: []router.ClientEVMTokenAmount{ + { + Token: c.Source.LinkToken.Address(), + Amount: tokenAmount, + }, + }, + FeeToken: c.Source.LinkToken.Address(), + ExtraArgs: extraArgs, + } + fee, err := c.Source.Router.GetFee(nil, c.Dest.ChainSelector, msg) + require.NoError(t, err) + // Currently no overhead and 1gwei dest gas price. So fee is simply gasLimit * gasPrice. + // require.Equal(t, new(big.Int).Mul(gasLimit, gasPrice).String(), fee.String()) + // Approve the fee amount + the token amount + _, err = c.Source.LinkToken.Approve(c.Source.User, c.Source.Router.Address(), new(big.Int).Add(fee, tokenAmount)) + require.NoError(t, err) + c.Source.Chain.Commit() + c.SendRequest(t, msg) +} + +func GetBalances(t *testing.T, brs []BalanceReq) (map[string]*big.Int, error) { + m := make(map[string]*big.Int) + for _, br := range brs { + m[br.Name] = br.Getter(t, br.Addr) + if m[br.Name] == nil { + return nil, fmt.Errorf("%v getter return nil", br.Name) + } + } + return m, nil +} + +func MustAddBigInt(a *big.Int, b string) *big.Int { + bi, _ := big.NewInt(0).SetString(b, 10) + return big.NewInt(0).Add(a, bi) +} + +func MustSubBigInt(a *big.Int, b string) *big.Int { + bi, _ := big.NewInt(0).SetString(b, 10) + return big.NewInt(0).Sub(a, bi) +} + +func MustEncodeAddress(t *testing.T, address common.Address) []byte { + bts, err := utils.ABIEncode(`[{"type":"address"}]`, address) + require.NoError(t, err) + return bts +} + +func SetupCCIPContracts(t *testing.T, sourceChainID, sourceChainSelector, destChainID, destChainSelector uint64) CCIPContracts { + sourceChain, sourceUser := testhelpers.SetupChain(t) + destChain, destUser := testhelpers.SetupChain(t) + + armSourceAddress, _, _, err := mock_arm_contract.DeployMockARMContract( + sourceUser, + sourceChain, + ) + require.NoError(t, err) + sourceARM, err := mock_arm_contract.NewMockARMContract(armSourceAddress, sourceChain) + require.NoError(t, err) + armProxySourceAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract( + sourceUser, + sourceChain, + armSourceAddress, + ) + require.NoError(t, err) + sourceARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxySourceAddress, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + + armDestAddress, _, _, err := mock_arm_contract.DeployMockARMContract( + destUser, + destChain, + ) + require.NoError(t, err) + armProxyDestAddress, _, _, err := arm_proxy_contract.DeployARMProxyContract( + destUser, + destChain, + armDestAddress, + ) + require.NoError(t, err) + destChain.Commit() + destARM, err := mock_arm_contract.NewMockARMContract(armDestAddress, destChain) + require.NoError(t, err) + destARMProxy, err := arm_proxy_contract.NewARMProxyContract(armProxyDestAddress, destChain) + require.NoError(t, err) + + // Deploy link token and pool on source chain + sourceLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + sourceLinkToken, err := link_token_interface.NewLinkToken(sourceLinkTokenAddress, sourceChain) + require.NoError(t, err) + + // Create router + sourceWeth9addr, _, _, err := weth9.DeployWETH9(sourceUser, sourceChain) + require.NoError(t, err) + sourceWrapped, err := weth9.NewWETH9(sourceWeth9addr, sourceChain) + require.NoError(t, err) + + sourceRouterAddress, _, _, err := router.DeployRouter(sourceUser, sourceChain, sourceWeth9addr, armProxySourceAddress) + require.NoError(t, err) + sourceRouter, err := router.NewRouter(sourceRouterAddress, sourceChain) + require.NoError(t, err) + sourceChain.Commit() + + sourceWeth9PoolAddress, _, _, err := lock_release_token_pool_1_0_0.DeployLockReleaseTokenPool( + sourceUser, + sourceChain, + sourceWeth9addr, + []common.Address{}, + armProxySourceAddress, + ) + require.NoError(t, err) + sourceChain.Commit() + + sourceWeth9Pool, err := lock_release_token_pool_1_0_0.NewLockReleaseTokenPool(sourceWeth9PoolAddress, sourceChain) + require.NoError(t, err) + + sourcePoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + sourceUser, + sourceChain, + sourceLinkTokenAddress, + []common.Address{}, + armProxySourceAddress, + true, + sourceRouterAddress, + ) + require.NoError(t, err) + sourceChain.Commit() + sourcePool, err := lock_release_token_pool.NewLockReleaseTokenPool(sourcePoolAddress, sourceChain) + require.NoError(t, err) + + // Deploy custom token pool source + sourceCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(sourceUser, sourceChain) // Just re-use this, it's an ERC20. + require.NoError(t, err) + sourceCustomToken, err := link_token_interface.NewLinkToken(sourceCustomTokenAddress, sourceChain) + require.NoError(t, err) + destChain.Commit() + + // Deploy custom token pool dest + destCustomTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain) // Just re-use this, it's an ERC20. + require.NoError(t, err) + destCustomToken, err := link_token_interface.NewLinkToken(destCustomTokenAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + // Deploy and configure onramp + sourcePricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + sourceUser, + sourceChain, + nil, + []common.Address{sourceLinkTokenAddress, sourceWeth9addr}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + + srcPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(sourcePricesAddress, sourceChain) + require.NoError(t, err) + + _, err = srcPriceRegistry.UpdatePrices(sourceUser, price_registry_1_2_0.InternalPriceUpdates{ + TokenPriceUpdates: []price_registry_1_2_0.InternalTokenPriceUpdate{ + { + SourceToken: sourceLinkTokenAddress, + UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(20)), + }, + { + SourceToken: sourceWeth9addr, + UsdPerToken: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(2000)), + }, + }, + GasPriceUpdates: []price_registry_1_2_0.InternalGasPriceUpdate{ + { + DestChainSelector: destChainSelector, + UsdPerUnitGas: big.NewInt(20000e9), + }, + }, + }) + require.NoError(t, err) + + onRampAddress, _, _, err := evm_2_evm_onramp.DeployEVM2EVMOnRamp( + sourceUser, // user + sourceChain, // client + evm_2_evm_onramp.EVM2EVMOnRampStaticConfig{ + LinkToken: sourceLinkTokenAddress, + ChainSelector: sourceChainSelector, + DestChainSelector: destChainSelector, + DefaultTxGasLimit: 200_000, + MaxNopFeesJuels: big.NewInt(0).Mul(big.NewInt(100_000_000), big.NewInt(1e18)), + PrevOnRamp: common.HexToAddress(""), + ArmProxy: armProxySourceAddress, // ARM + }, + evm_2_evm_onramp.EVM2EVMOnRampDynamicConfig{ + Router: sourceRouterAddress, + MaxNumberOfTokensPerMsg: 5, + DestGasOverhead: 350_000, + DestGasPerPayloadByte: 16, + DestDataAvailabilityOverheadGas: 33_596, + DestGasPerDataAvailabilityByte: 16, + DestDataAvailabilityMultiplierBps: 6840, // 0.684 + PriceRegistry: sourcePricesAddress, + MaxDataBytes: 1e5, + MaxPerMsgGasLimit: 4_000_000, + }, + []evm_2_evm_onramp.InternalPoolUpdate{ + { + Token: sourceLinkTokenAddress, + Pool: sourcePoolAddress, + }, + { + Token: sourceWeth9addr, + Pool: sourceWeth9PoolAddress, + }, + }, + evm_2_evm_onramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + []evm_2_evm_onramp.EVM2EVMOnRampFeeTokenConfigArgs{ + { + Token: sourceLinkTokenAddress, + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 9e17, + Enabled: true, + }, + { + Token: sourceWeth9addr, + NetworkFeeUSDCents: 1_00, + GasMultiplierWeiPerEth: 1e18, + PremiumMultiplierWeiPerEth: 1e18, + Enabled: true, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampTokenTransferFeeConfigArgs{ + { + Token: sourceLinkTokenAddress, + MinFeeUSDCents: 50, // $0.5 + MaxFeeUSDCents: 1_000_000_00, // $ 1 million + DeciBps: 5_0, // 5 bps + DestGasOverhead: 34_000, + DestBytesOverhead: 32, + }, + }, + []evm_2_evm_onramp.EVM2EVMOnRampNopAndWeight{}, + ) + require.NoError(t, err) + onRamp, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(onRampAddress, sourceChain) + require.NoError(t, err) + _, err = sourcePool.ApplyChainUpdates( + sourceUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: DestChainSelector, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + _, err = sourceWeth9Pool.ApplyRampUpdates(sourceUser, + []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{{Ramp: onRampAddress, Allowed: true, + RateLimiterConfig: lock_release_token_pool_1_0_0.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{}, + ) + require.NoError(t, err) + sourceChain.Commit() + _, err = sourceRouter.ApplyRampUpdates(sourceUser, []router.RouterOnRamp{{DestChainSelector: destChainSelector, OnRamp: onRampAddress}}, nil, nil) + require.NoError(t, err) + sourceChain.Commit() + + destWethaddr, _, _, err := weth9.DeployWETH9(destUser, destChain) + require.NoError(t, err) + destWrapped, err := weth9.NewWETH9(destWethaddr, destChain) + require.NoError(t, err) + + // Create dest router + destRouterAddress, _, _, err := router.DeployRouter(destUser, destChain, destWethaddr, armProxyDestAddress) + require.NoError(t, err) + destChain.Commit() + destRouter, err := router.NewRouter(destRouterAddress, destChain) + require.NoError(t, err) + + // Deploy link token and pool on destination chain + destLinkTokenAddress, _, _, err := link_token_interface.DeployLinkToken(destUser, destChain) + require.NoError(t, err) + destChain.Commit() + destLinkToken, err := link_token_interface.NewLinkToken(destLinkTokenAddress, destChain) + require.NoError(t, err) + destPoolAddress, _, _, err := lock_release_token_pool.DeployLockReleaseTokenPool( + destUser, + destChain, + destLinkTokenAddress, + []common.Address{}, + armProxyDestAddress, + true, + destRouterAddress, + ) + require.NoError(t, err) + destChain.Commit() + destPool, err := lock_release_token_pool.NewLockReleaseTokenPool(destPoolAddress, destChain) + require.NoError(t, err) + destChain.Commit() + + // Float the offramp pool + o, err := destPool.Owner(nil) + require.NoError(t, err) + require.Equal(t, destUser.From.String(), o.String()) + _, err = destPool.SetRebalancer(destUser, destUser.From) + require.NoError(t, err) + _, err = destLinkToken.Approve(destUser, destPoolAddress, Link(200)) + require.NoError(t, err) + _, err = destPool.ProvideLiquidity(destUser, Link(200)) + require.NoError(t, err) + destChain.Commit() + + destWrappedPoolAddress, _, _, err := lock_release_token_pool_1_0_0.DeployLockReleaseTokenPool( + destUser, + destChain, + destWethaddr, + []common.Address{}, + armProxyDestAddress, + ) + require.NoError(t, err) + destWrappedPool, err := lock_release_token_pool_1_0_0.NewLockReleaseTokenPool(destWrappedPoolAddress, destChain) + require.NoError(t, err) + + poolFloatValue := big.NewInt(1e18) + + destUser.Value = poolFloatValue + _, err = destWrapped.Deposit(destUser) + require.NoError(t, err) + destChain.Commit() + destUser.Value = nil + + _, err = destWrapped.Transfer(destUser, destWrappedPool.Address(), poolFloatValue) + require.NoError(t, err) + destChain.Commit() + + // Deploy and configure ge offramp. + destPricesAddress, _, _, err := price_registry_1_2_0.DeployPriceRegistry( + destUser, + destChain, + nil, + []common.Address{destLinkTokenAddress}, + 60*60*24*14, // two weeks + ) + require.NoError(t, err) + destPriceRegistry, err := price_registry_1_2_0.NewPriceRegistry(destPricesAddress, destChain) + require.NoError(t, err) + + // Deploy commit store. + commitStoreAddress, _, _, err := commit_store_1_2_0.DeployCommitStore( + destUser, // user + destChain, // client + commit_store_1_2_0.CommitStoreStaticConfig{ + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRamp.Address(), + ArmProxy: destARMProxy.Address(), + }, + ) + require.NoError(t, err) + destChain.Commit() + commitStore, err := commit_store_1_2_0.NewCommitStore(commitStoreAddress, destChain) + require.NoError(t, err) + + offRampAddress, _, _, err := evm_2_evm_offramp.DeployEVM2EVMOffRamp( + destUser, + destChain, + evm_2_evm_offramp.EVM2EVMOffRampStaticConfig{ + CommitStore: commitStore.Address(), + ChainSelector: destChainSelector, + SourceChainSelector: sourceChainSelector, + OnRamp: onRampAddress, + PrevOffRamp: common.HexToAddress(""), + ArmProxy: armProxyDestAddress, + }, + []common.Address{sourceLinkTokenAddress, sourceWeth9addr}, + []common.Address{destPoolAddress, destWrappedPool.Address()}, + evm_2_evm_offramp.RateLimiterConfig{ + IsEnabled: true, + Capacity: LinkUSDValue(100), + Rate: LinkUSDValue(1), + }, + ) + require.NoError(t, err) + offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampAddress, destChain) + require.NoError(t, err) + _, err = destPool.ApplyChainUpdates(destUser, + []lock_release_token_pool.TokenPoolChainUpdate{{ + RemoteChainSelector: sourceChainSelector, + Allowed: true, + OutboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + InboundRateLimiterConfig: lock_release_token_pool.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + + _, err = destWrappedPool.ApplyRampUpdates(destUser, + []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{}, + []lock_release_token_pool_1_0_0.TokenPoolRampUpdate{{ + Ramp: offRampAddress, + Allowed: true, + RateLimiterConfig: lock_release_token_pool_1_0_0.RateLimiterConfig{ + IsEnabled: true, + Capacity: HundredLink, + Rate: big.NewInt(1e18), + }, + }}, + ) + require.NoError(t, err) + + destChain.Commit() + _, err = destPriceRegistry.ApplyPriceUpdatersUpdates(destUser, []common.Address{commitStoreAddress}, []common.Address{}) + require.NoError(t, err) + _, err = destRouter.ApplyRampUpdates(destUser, nil, + nil, []router.RouterOffRamp{{SourceChainSelector: sourceChainSelector, OffRamp: offRampAddress}}) + require.NoError(t, err) + + // Deploy 2 revertable (one SS one non-SS) + revertingMessageReceiver1Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false) + require.NoError(t, err) + revertingMessageReceiver1, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver1Address, destChain) + revertingMessageReceiver2Address, _, _, err := maybe_revert_message_receiver.DeployMaybeRevertMessageReceiver(destUser, destChain, false) + require.NoError(t, err) + revertingMessageReceiver2, _ := maybe_revert_message_receiver.NewMaybeRevertMessageReceiver(revertingMessageReceiver2Address, destChain) + // Need to commit here, or we will hit the block gas limit when deploying the executor + sourceChain.Commit() + destChain.Commit() + + // Ensure we have at least finality blocks. + for i := 0; i < 50; i++ { + sourceChain.Commit() + destChain.Commit() + } + + source := SourceChain{ + Common: Common{ + ChainID: sourceChainID, + ChainSelector: sourceChainSelector, + User: sourceUser, + Chain: sourceChain, + LinkToken: sourceLinkToken, + LinkTokenPool: sourcePool, + CustomToken: sourceCustomToken, + ARM: sourceARM, + ARMProxy: sourceARMProxy, + PriceRegistry: srcPriceRegistry, + WrappedNative: sourceWrapped, + WrappedNativePool: sourceWeth9Pool, + }, + Router: sourceRouter, + OnRamp: onRamp, + } + dest := DestinationChain{ + Common: Common{ + ChainID: destChainID, + ChainSelector: destChainSelector, + User: destUser, + Chain: destChain, + LinkToken: destLinkToken, + LinkTokenPool: destPool, + CustomToken: destCustomToken, + ARM: destARM, + ARMProxy: destARMProxy, + PriceRegistry: destPriceRegistry, + WrappedNative: destWrapped, + WrappedNativePool: destWrappedPool, + }, + CommitStore: commitStore, + Router: destRouter, + OffRamp: offRamp, + Receivers: []MaybeRevertReceiver{{Receiver: revertingMessageReceiver1, Strict: false}, {Receiver: revertingMessageReceiver2, Strict: true}}, + } + + return CCIPContracts{ + Source: source, + Dest: dest, + } +} + +func (c *CCIPContracts) SendRequest(t *testing.T, msg router.ClientEVM2AnyMessage) *types.Transaction { + tx, err := c.Source.Router.CcipSend(c.Source.User, c.Dest.ChainSelector, msg) + require.NoError(t, err) + testhelpers.ConfirmTxs(t, []*types.Transaction{tx}, c.Source.Chain) + return tx +} + +func (c *CCIPContracts) AssertExecState(t *testing.T, log logpoller.Log, state MessageExecutionState, offRampOpts ...common.Address) { + var offRamp *evm_2_evm_offramp.EVM2EVMOffRamp + var err error + if len(offRampOpts) > 0 { + offRamp, err = evm_2_evm_offramp.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.OffRamp, "no offRamp configured") + offRamp = c.Dest.OffRamp + } + executionStateChanged, err := offRamp.ParseExecutionStateChanged(log.ToGethLog()) + require.NoError(t, err) + if MessageExecutionState(executionStateChanged.State) != state { + t.Log("Execution failed", hexutil.Encode(executionStateChanged.ReturnData)) + t.Fail() + } +} + +func GetEVMExtraArgsV1(gasLimit *big.Int, strict bool) ([]byte, error) { + EVMV1Tag := []byte{0x97, 0xa6, 0x57, 0xc9} + + encodedArgs, err := utils.ABIEncode(`[{"type":"uint256"},{"type":"bool"}]`, gasLimit, strict) + if err != nil { + return nil, err + } + + return append(EVMV1Tag, encodedArgs...), nil +} + +type ManualExecArgs struct { + SourceChainID, DestChainID uint64 + DestUser *bind.TransactOpts + SourceChain, DestChain bind.ContractBackend + SourceStartBlock *big.Int // the block in/after which failed ccip-send transaction was triggered + DestStartBlock uint64 // the start block for filtering ReportAccepted event (including the failed seq num) + // in destination chain. if not provided to be derived by ApproxDestStartBlock method + DestLatestBlockNum uint64 // current block number in destination + DestDeployedAt uint64 // destination block number for the initial destination contract deployment. + // Can be any number before the tx was reverted in destination chain. Preferably this needs to be set up with + // a value greater than zero to avoid performance issue in locating approximate destination block + SendReqLogIndex uint // log index of the CCIPSendRequested log in source chain + SendReqTxHash string // tx hash of the ccip-send transaction for which execution was reverted + CommitStore string + OnRamp string + OffRamp string + SeqNr uint64 + GasLimit *big.Int +} + +// ApproxDestStartBlock attempts to locate a block in destination chain with timestamp closest to the timestamp of the block +// in source chain in which ccip-send transaction was included +// it uses binary search to locate the block with the closest timestamp +// if the block located has a timestamp greater than the timestamp of mentioned source block +// it just returns the first block found with lesser timestamp of the source block +// providing a value of args.DestDeployedAt ensures better performance by reducing the range of block numbers to be traversed +func (args *ManualExecArgs) ApproxDestStartBlock() error { + sourceBlockHdr, err := args.SourceChain.HeaderByNumber(context.Background(), args.SourceStartBlock) + if err != nil { + return err + } + sendTxTime := sourceBlockHdr.Time + maxBlockNum := args.DestLatestBlockNum + // setting this to an approx value of 1000 considering destination chain would have at least 1000 blocks before the transaction started + minBlockNum := args.DestDeployedAt + closestBlockNum := uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2)) + var closestBlockHdr *types.Header + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + // to reduce the number of RPC calls increase the value of blockOffset + blockOffset := uint64(10) + for { + blockNum := closestBlockHdr.Number.Uint64() + if minBlockNum > maxBlockNum { + break + } + timeDiff := math.Abs(float64(closestBlockHdr.Time - sendTxTime)) + // break if the difference in timestamp is lesser than 1 minute + if timeDiff < 60 { + break + } else if closestBlockHdr.Time > sendTxTime { + maxBlockNum = blockNum - 1 + } else { + minBlockNum = blockNum + 1 + } + closestBlockNum = uint64(math.Floor((float64(maxBlockNum) + float64(minBlockNum)) / 2)) + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + } + + for closestBlockHdr.Time > sendTxTime { + closestBlockNum = closestBlockNum - blockOffset + if closestBlockNum <= 0 { + return fmt.Errorf("approx destination blocknumber not found") + } + closestBlockHdr, err = args.DestChain.HeaderByNumber(context.Background(), big.NewInt(int64(closestBlockNum))) + if err != nil { + return err + } + } + args.DestStartBlock = closestBlockHdr.Number.Uint64() + fmt.Println("using approx destination start block number", args.DestStartBlock) + return nil +} + +func (args *ManualExecArgs) FindSeqNrFromCCIPSendRequested() (uint64, error) { + var seqNr uint64 + onRampContract, err := evm_2_evm_onramp.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain) + if err != nil { + return seqNr, err + } + iterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{ + Start: args.SourceStartBlock.Uint64(), + }) + if err != nil { + return seqNr, err + } + for iterator.Next() { + if iterator.Event.Raw.Index == args.SendReqLogIndex && + iterator.Event.Raw.TxHash.Hex() == args.SendReqTxHash { + seqNr = iterator.Event.Message.SequenceNumber + break + } + } + if seqNr == 0 { + return seqNr, + fmt.Errorf("no CCIPSendRequested logs found for logIndex %d starting from block number %d", args.SendReqLogIndex, args.SourceStartBlock) + } + return seqNr, nil +} + +func (args *ManualExecArgs) ExecuteManually() (*types.Transaction, error) { + if args.SourceChainID == 0 || + args.DestChainID == 0 || + args.DestUser == nil { + return nil, fmt.Errorf("chain ids and owners are mandatory for source and dest chain") + } + if !common.IsHexAddress(args.CommitStore) || + !common.IsHexAddress(args.OffRamp) || + !common.IsHexAddress(args.OnRamp) { + return nil, fmt.Errorf("contract addresses must be valid hex address") + } + if args.SendReqTxHash == "" { + return nil, fmt.Errorf("tx hash of ccip-send request are required") + } + if args.SourceStartBlock == nil { + return nil, fmt.Errorf("must provide the value of source block in/after which ccip-send tx was included") + } + if args.SeqNr == 0 { + if args.SendReqLogIndex == 0 { + return nil, fmt.Errorf("must provide the value of log index of ccip-send request") + } + // locate seq nr from CCIPSendRequested log + seqNr, err := args.FindSeqNrFromCCIPSendRequested() + if err != nil { + return nil, err + } + args.SeqNr = seqNr + } + commitStore, err := commit_store_1_2_0.NewCommitStore(common.HexToAddress(args.CommitStore), args.DestChain) + if err != nil { + return nil, err + } + if args.DestStartBlock < 1 { + err = args.ApproxDestStartBlock() + if err != nil { + return nil, err + } + } + iterator, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: args.DestStartBlock}) + if err != nil { + return nil, err + } + + var commitReport *commit_store_1_2_0.CommitStoreCommitReport + for iterator.Next() { + if iterator.Event.Report.Interval.Min <= args.SeqNr && iterator.Event.Report.Interval.Max >= args.SeqNr { + commitReport = &iterator.Event.Report + fmt.Println("Found root") + break + } + } + if commitReport == nil { + return nil, fmt.Errorf("unable to find seq num %d in commit report", args.SeqNr) + } + + return args.execute(commitReport) +} + +func (args *ManualExecArgs) execute(report *commit_store_1_2_0.CommitStoreCommitReport) (*types.Transaction, error) { + log.Info().Msg("Executing request manually") + seqNr := args.SeqNr + // Build a merkle tree for the report + mctx := hashutil.NewKeccak() + onRampContract, err := evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(common.HexToAddress(args.OnRamp), args.SourceChain) + if err != nil { + return nil, err + } + leafHasher := v1_2_0.NewLeafHasher(args.SourceChainID, args.DestChainID, common.HexToAddress(args.OnRamp), mctx, onRampContract) + if leafHasher == nil { + return nil, fmt.Errorf("unable to create leaf hasher") + } + + var leaves [][32]byte + var curr, prove int + var msgs []evm_2_evm_offramp.InternalEVM2EVMMessage + var manualExecGasLimits []*big.Int + var tokenData [][][]byte + sendRequestedIterator, err := onRampContract.FilterCCIPSendRequested(&bind.FilterOpts{ + Start: args.SourceStartBlock.Uint64(), + }) + if err != nil { + return nil, err + } + for sendRequestedIterator.Next() { + if sendRequestedIterator.Event.Message.SequenceNumber <= report.Interval.Max && + sendRequestedIterator.Event.Message.SequenceNumber >= report.Interval.Min { + fmt.Println("Found seq num", sendRequestedIterator.Event.Message.SequenceNumber, report.Interval) + hash, err2 := leafHasher.HashLeaf(sendRequestedIterator.Event.Raw) + if err2 != nil { + return nil, err2 + } + leaves = append(leaves, hash) + if sendRequestedIterator.Event.Message.SequenceNumber == seqNr { + fmt.Printf("Found proving %d %+v\n", curr, sendRequestedIterator.Event.Message) + var tokensAndAmounts []evm_2_evm_offramp.ClientEVMTokenAmount + for _, tokenAndAmount := range sendRequestedIterator.Event.Message.TokenAmounts { + tokensAndAmounts = append(tokensAndAmounts, evm_2_evm_offramp.ClientEVMTokenAmount{ + Token: tokenAndAmount.Token, + Amount: tokenAndAmount.Amount, + }) + } + msg := evm_2_evm_offramp.InternalEVM2EVMMessage{ + SourceChainSelector: sendRequestedIterator.Event.Message.SourceChainSelector, + Sender: sendRequestedIterator.Event.Message.Sender, + Receiver: sendRequestedIterator.Event.Message.Receiver, + SequenceNumber: sendRequestedIterator.Event.Message.SequenceNumber, + GasLimit: sendRequestedIterator.Event.Message.GasLimit, + Strict: sendRequestedIterator.Event.Message.Strict, + Nonce: sendRequestedIterator.Event.Message.Nonce, + FeeToken: sendRequestedIterator.Event.Message.FeeToken, + FeeTokenAmount: sendRequestedIterator.Event.Message.FeeTokenAmount, + Data: sendRequestedIterator.Event.Message.Data, + TokenAmounts: tokensAndAmounts, + SourceTokenData: sendRequestedIterator.Event.Message.SourceTokenData, + MessageId: sendRequestedIterator.Event.Message.MessageId, + } + msgs = append(msgs, msg) + if args.GasLimit != nil { + msg.GasLimit = args.GasLimit + } + manualExecGasLimits = append(manualExecGasLimits, msg.GasLimit) + var msgTokenData [][]byte + for range sendRequestedIterator.Event.Message.TokenAmounts { + msgTokenData = append(msgTokenData, []byte{}) + } + + tokenData = append(tokenData, msgTokenData) + prove = curr + } + curr++ + } + } + sendRequestedIterator.Close() + if msgs == nil { + return nil, fmt.Errorf("unable to find msg with seqNr %d", seqNr) + } + tree, err := merklemulti.NewTree(mctx, leaves) + if err != nil { + return nil, err + } + if tree.Root() != report.MerkleRoot { + return nil, fmt.Errorf("root doesn't match") + } + + proof, err := tree.Prove([]int{prove}) + if err != nil { + return nil, err + } + + offRampProof := evm_2_evm_offramp.InternalExecutionReport{ + Messages: msgs, + OffchainTokenData: tokenData, + Proofs: proof.Hashes, + ProofFlagBits: abihelpers.ProofFlagsToBits(proof.SourceFlags), + } + offRamp, err := evm_2_evm_offramp.NewEVM2EVMOffRamp(common.HexToAddress(args.OffRamp), args.DestChain) + if err != nil { + return nil, err + } + // Execute. + return offRamp.ManuallyExecute(args.DestUser, offRampProof, manualExecGasLimits) +} + +func (c *CCIPContracts) ExecuteMessage( + t *testing.T, + req logpoller.Log, + txHash common.Hash, + destStartBlock uint64, +) uint64 { + t.Log("Executing request manually") + sendReqReceipt, err := c.Source.Chain.TransactionReceipt(context.Background(), txHash) + require.NoError(t, err) + args := ManualExecArgs{ + SourceChainID: c.Source.ChainID, + DestChainID: c.Dest.ChainID, + DestUser: c.Dest.User, + SourceChain: c.Source.Chain, + DestChain: c.Dest.Chain, + SourceStartBlock: sendReqReceipt.BlockNumber, + DestStartBlock: destStartBlock, + DestLatestBlockNum: c.Dest.Chain.Blockchain().CurrentBlock().Number.Uint64(), + SendReqLogIndex: uint(req.LogIndex), + SendReqTxHash: txHash.String(), + CommitStore: c.Dest.CommitStore.Address().String(), + OnRamp: c.Source.OnRamp.Address().String(), + OffRamp: c.Dest.OffRamp.Address().String(), + } + tx, err := args.ExecuteManually() + require.NoError(t, err) + c.Dest.Chain.Commit() + c.Source.Chain.Commit() + rec, err := c.Dest.Chain.TransactionReceipt(context.Background(), tx.Hash()) + require.NoError(t, err) + require.Equal(t, uint64(1), rec.Status, "manual execution failed") + t.Logf("Manual Execution completed for seqNum %d", args.SeqNr) + return args.SeqNr +} + +func GetBalance(t *testing.T, chain bind.ContractBackend, tokenAddr common.Address, addr common.Address) *big.Int { + token, err := link_token_interface.NewLinkToken(tokenAddr, chain) + require.NoError(t, err) + bal, err := token.BalanceOf(nil, addr) + require.NoError(t, err) + return bal +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go new file mode 100644 index 00000000000..25be1c2a9a9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/chainlink.go @@ -0,0 +1,1045 @@ +package testhelpers_1_4_0 + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + types3 "github.com/ethereum/go-ethereum/core/types" + "github.com/google/uuid" + "github.com/hashicorp/consul/sdk/freeport" + "github.com/jmoiron/sqlx" + "github.com/onsi/gomega" + "github.com/pkg/errors" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "k8s.io/utils/pointer" //nolint:staticcheck + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/offchainreporting2/confighelper" + types4 "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + "github.com/smartcontractkit/chainlink-common/pkg/loop" + "github.com/smartcontractkit/chainlink-common/pkg/utils/mailbox" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + coretypes "github.com/smartcontractkit/chainlink-common/pkg/types/core/mocks" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + v2 "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + evmUtils "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + configv2 "github.com/smartcontractkit/chainlink/v2/core/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/commit_store_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_offramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/evm_2_evm_onramp_1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/logger/audit" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" + feeds2 "github.com/smartcontractkit/chainlink/v2/core/services/feeds" + feedsMocks "github.com/smartcontractkit/chainlink/v2/core/services/feeds/mocks" + pb "github.com/smartcontractkit/chainlink/v2/core/services/feeds/proto" + "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key" + ksMocks "github.com/smartcontractkit/chainlink/v2/core/services/keystore/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_0_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" + integrationtesthelpers "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers/integration" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" + "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap" + evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + clutils "github.com/smartcontractkit/chainlink/v2/core/utils" + "github.com/smartcontractkit/chainlink/v2/core/utils/crypto" + "github.com/smartcontractkit/chainlink/v2/plugins" +) + +const ( + execSpecTemplate = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-exec-1" + externalJobID = "67ffad71-d90f-4fe3-b4e4-494924b707fb" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-execution" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + + [pluginConfig.USDCConfig] + AttestationAPI = "http://blah.com" + SourceMessageTransmitterAddress = "%s" + SourceTokenAddress = "%s" + AttestationAPITimeoutSeconds = 10 + ` + commitSpecTemplatePipeline = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-commit-1" + externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-commit" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + offRamp = "%s" + tokenPricesUSDPipeline = """ + %s + """ + ` + commitSpecTemplateDynamicPriceGetter = ` + type = "offchainreporting2" + schemaVersion = 1 + name = "ccip-commit-1" + externalJobID = "13c997cf-1a14-4ab7-9068-07ee6d2afa55" + forwardingAllowed = false + maxTaskDuration = "0s" + contractID = "%s" + contractConfigConfirmations = 1 + contractConfigTrackerPollInterval = "20s" + ocrKeyBundleID = "%s" + relay = "evm" + pluginType = "ccip-commit" + transmitterID = "%s" + + [relayConfig] + chainID = 1_337 + + [pluginConfig] + destStartBlock = 50 + offRamp = "%s" + priceGetterConfig = """ + %s + """ + ` +) + +type Node struct { + App chainlink.Application + Transmitter common.Address + PaymentReceiver common.Address + KeyBundle ocr2key.KeyBundle +} + +func (node *Node) FindJobIDForContract(t *testing.T, addr common.Address) int32 { + jobs := node.App.JobSpawner().ActiveJobs() + for _, j := range jobs { + if j.Type == job.OffchainReporting2 && j.OCR2OracleSpec.ContractID == addr.Hex() { + return j.ID + } + } + t.Fatalf("Could not find job for contract %s", addr.Hex()) + return 0 +} + +func (node *Node) EventuallyNodeUsesUpdatedPriceRegistry(t *testing.T, ccipContracts CCIPIntegrationTestHarness) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + v1_0_0.UsdPerUnitGasUpdated, + ccipContracts.Dest.PriceRegistry.Address(), + 0, + ) + // err can be transient errors such as sql row set empty + if err != nil { + return false + } + return log != nil + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is not using updated price registry %s", ccipContracts.Dest.PriceRegistry.Address().Hex()) + return log +} + +func (node *Node) EventuallyNodeUsesNewCommitConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, commitCfg ccipdata.CommitOnchainConfig) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + evmrelay.OCR2AggregatorLogDecoder.EventSig(), + ccipContracts.Dest.CommitStore.Address(), + 0, + ) + require.NoError(t, err) + var latestCfg ccipdata.CommitOnchainConfig + if log != nil { + latestCfg, err = DecodeCommitOnChainConfig(log.Data) + require.NoError(t, err) + return latestCfg == commitCfg + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg") + return log +} + +func (node *Node) EventuallyNodeUsesNewExecConfig(t *testing.T, ccipContracts CCIPIntegrationTestHarness, execCfg v1_2_0.ExecOnchainConfig) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + log, err := c.LogPoller().LatestLogByEventSigWithConfs( + testutils.Context(t), + evmrelay.OCR2AggregatorLogDecoder.EventSig(), + ccipContracts.Dest.OffRamp.Address(), + 0, + ) + require.NoError(t, err) + var latestCfg v1_2_0.ExecOnchainConfig + if log != nil { + latestCfg, err = DecodeExecOnChainConfig(log.Data) + require.NoError(t, err) + return latestCfg == execCfg + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "node is using old cfg") + return log +} + +func (node *Node) EventuallyHasReqSeqNum(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, onRamp common.Address, seqNum int) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Source.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().LogsDataWordRange( + testutils.Context(t), + v1_2_0.CCIPSendRequestEventSig, + onRamp, + v1_2_0.CCIPSendRequestSeqNumIndex, + abihelpers.EvmWord(uint64(seqNum)), + abihelpers.EvmWord(uint64(seqNum)), + 1, + ) + require.NoError(t, err) + t.Log("Send requested", len(lgs)) + if len(lgs) == 1 { + log = lgs[0] + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has seq num") + return log +} + +func (node *Node) EventuallyHasExecutedSeqNums(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, minSeqNum int, maxSeqNum int) []logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var logs []logpoller.Log + gomega.NewGomegaWithT(t).Eventually(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().IndexedLogsTopicRange( + testutils.Context(t), + v1_0_0.ExecutionStateChangedEvent, + offRamp, + v1_0_0.ExecutionStateChangedSeqNrIndex, + abihelpers.EvmWord(uint64(minSeqNum)), + abihelpers.EvmWord(uint64(maxSeqNum)), + 1, + ) + require.NoError(t, err) + t.Logf("Have executed logs %d want %d", len(lgs), maxSeqNum-minSeqNum+1) + if len(lgs) == maxSeqNum-minSeqNum+1 { + logs = lgs + t.Logf("Seq Num %d-%d executed", minSeqNum, maxSeqNum) + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "eventually has not executed seq num") + return logs +} + +func (node *Node) ConsistentlySeqNumHasNotBeenExecuted(t *testing.T, ccipContracts *CCIPIntegrationTestHarness, offRamp common.Address, seqNum int) logpoller.Log { + c, err := node.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(ccipContracts.Dest.ChainID, 10)) + require.NoError(t, err) + var log logpoller.Log + gomega.NewGomegaWithT(t).Consistently(func() bool { + ccipContracts.Source.Chain.Commit() + ccipContracts.Dest.Chain.Commit() + lgs, err := c.LogPoller().IndexedLogsTopicRange( + testutils.Context(t), + v1_0_0.ExecutionStateChangedEvent, + offRamp, + v1_0_0.ExecutionStateChangedSeqNrIndex, + abihelpers.EvmWord(uint64(seqNum)), + abihelpers.EvmWord(uint64(seqNum)), + 1, + ) + require.NoError(t, err) + t.Log("Executed logs", lgs) + if len(lgs) == 1 { + log = lgs[0] + return true + } + return false + }, 10*time.Second, 1*time.Second).Should(gomega.BeFalse(), "seq number got executed") + return log +} + +func (node *Node) AddJob(t *testing.T, spec *integrationtesthelpers.OCR2TaskJobSpec) { + specString, err := spec.String() + require.NoError(t, err) + ccipJob, err := validate.ValidatedOracleSpecToml( + testutils.Context(t), + node.App.GetConfig().OCR2(), + node.App.GetConfig().Insecure(), + specString, + // FIXME Ani + nil, + ) + require.NoError(t, err) + err = node.App.AddJobV2(context.Background(), &ccipJob) + require.NoError(t, err) +} + +func (node *Node) AddBootstrapJob(t *testing.T, spec *integrationtesthelpers.OCR2TaskJobSpec) { + specString, err := spec.String() + require.NoError(t, err) + ccipJob, err := ocrbootstrap.ValidatedBootstrapSpecToml(specString) + require.NoError(t, err) + err = node.App.AddJobV2(context.Background(), &ccipJob) + require.NoError(t, err) +} + +func (node *Node) AddJobsWithSpec(t *testing.T, jobSpec *integrationtesthelpers.OCR2TaskJobSpec) { + // set node specific values + jobSpec.OCR2OracleSpec.OCRKeyBundleID.SetValid(node.KeyBundle.ID()) + jobSpec.OCR2OracleSpec.TransmitterID.SetValid(node.Transmitter.Hex()) + node.AddJob(t, jobSpec) +} + +func setupNodeCCIP( + t *testing.T, + owner *bind.TransactOpts, + port int64, + dbName string, + sourceChain *backends.SimulatedBackend, destChain *backends.SimulatedBackend, + sourceChainID *big.Int, destChainID *big.Int, + bootstrapPeerID string, + bootstrapPort int64, +) (chainlink.Application, string, common.Address, ocr2key.KeyBundle) { + trueRef, falseRef := true, false + + // Do not want to load fixtures as they contain a dummy chainID. + loglevel := configv2.LogLevel(zap.DebugLevel) + config, db := heavyweight.FullTestDBNoFixturesV2(t, func(c *chainlink.Config, _ *chainlink.Secrets) { + p2pAddresses := []string{ + fmt.Sprintf("127.0.0.1:%d", port), + } + c.Log.Level = &loglevel + c.Feature.UICSAKeys = &trueRef + c.Feature.FeedsManager = &trueRef + c.OCR.Enabled = &falseRef + c.OCR.DefaultTransactionQueueDepth = pointer.Uint32(200) + c.OCR2.Enabled = &trueRef + c.Feature.LogPoller = &trueRef + c.P2P.V2.Enabled = &trueRef + + dur, err := config.NewDuration(500 * time.Millisecond) + if err != nil { + panic(err) + } + c.P2P.V2.DeltaDial = &dur + + dur2, err := config.NewDuration(5 * time.Second) + if err != nil { + panic(err) + } + + c.P2P.V2.DeltaReconcile = &dur2 + c.P2P.V2.ListenAddresses = &p2pAddresses + c.P2P.V2.AnnounceAddresses = &p2pAddresses + + c.EVM = []*v2.EVMConfig{createConfigV2Chain(sourceChainID), createConfigV2Chain(destChainID)} + + if bootstrapPeerID != "" { + // Supply the bootstrap IP and port as a V2 peer address + c.P2P.V2.DefaultBootstrappers = &[]commontypes.BootstrapperLocator{ + { + PeerID: bootstrapPeerID, Addrs: []string{ + fmt.Sprintf("127.0.0.1:%d", bootstrapPort), + }, + }, + } + } + }) + + lggr := logger.TestLogger(t) + + // The in-memory geth sim does not let you create a custom ChainID, it will always be 1337. + // In particular this means that if you sign an eip155 tx, the chainID used MUST be 1337 + // and the CHAINID op code will always emit 1337. To work around this to simulate a "multichain" + // test, we fake different chainIDs using the wrapped sim cltest.SimulatedBackend so the RPC + // appears to operate on different chainIDs and we use an EthKeyStoreSim wrapper which always + // signs 1337 see https://github.com/smartcontractkit/chainlink-ccip/blob/a24dd436810250a458d27d8bb3fb78096afeb79c/core/services/ocr2/plugins/ccip/testhelpers/simulated_backend.go#L35 + sourceClient := client.NewSimulatedBackendClient(t, sourceChain, sourceChainID) + destClient := client.NewSimulatedBackendClient(t, destChain, destChainID) + csaKeyStore := ksMocks.NewCSA(t) + + key, err := csakey.NewV2() + require.NoError(t, err) + csaKeyStore.On("GetAll").Return([]csakey.KeyV2{key}, nil) + keyStore := NewKsa(db, lggr, csaKeyStore) + + simEthKeyStore := testhelpers.EthKeyStoreSim{ + ETHKS: keyStore.Eth(), + CSAKS: keyStore.CSA(), + } + mailMon := mailbox.NewMonitor("CCIP", lggr.Named("Mailbox")) + evmOpts := chainlink.EVMFactoryConfig{ + ChainOpts: legacyevm.ChainOpts{ + AppConfig: config, + GenEthClient: func(chainID *big.Int) client.Client { + if chainID.String() == sourceChainID.String() { + return sourceClient + } else if chainID.String() == destChainID.String() { + return destClient + } + t.Fatalf("invalid chain ID %v", chainID.String()) + return nil + }, + MailMon: mailMon, + DS: db, + }, + CSAETHKeystore: simEthKeyStore, + } + loopRegistry := plugins.NewLoopRegistry(lggr.Named("LoopRegistry"), config.Tracing()) + relayerFactory := chainlink.RelayerFactory{ + Logger: lggr, + LoopRegistry: loopRegistry, + GRPCOpts: loop.GRPCOpts{}, + CapabilitiesRegistry: coretypes.NewCapabilitiesRegistry(t), + } + testCtx := testutils.Context(t) + // evm alway enabled for backward compatibility + initOps := []chainlink.CoreRelayerChainInitFunc{ + chainlink.InitEVM(testCtx, relayerFactory, evmOpts), + } + + relayChainInterops, err := chainlink.NewCoreRelayerChainInteroperators(initOps...) + if err != nil { + t.Fatal(err) + } + + app, err := chainlink.NewApplication(chainlink.ApplicationOpts{ + Config: config, + DS: db, + KeyStore: keyStore, + RelayerChainInteroperators: relayChainInterops, + Logger: lggr, + ExternalInitiatorManager: nil, + CloseLogger: lggr.Sync, + UnrestrictedHTTPClient: &http.Client{}, + RestrictedHTTPClient: &http.Client{}, + AuditLogger: audit.NoopLogger, + MailMon: mailMon, + LoopRegistry: plugins.NewLoopRegistry(lggr, config.Tracing()), + }) + ctx := testutils.Context(t) + require.NoError(t, err) + require.NoError(t, app.GetKeyStore().Unlock(ctx, "password")) + _, err = app.GetKeyStore().P2P().Create(ctx) + require.NoError(t, err) + + p2pIDs, err := app.GetKeyStore().P2P().GetAll() + require.NoError(t, err) + require.Len(t, p2pIDs, 1) + peerID := p2pIDs[0].PeerID() + + _, err = app.GetKeyStore().Eth().Create(testCtx, destChainID) + require.NoError(t, err) + sendingKeys, err := app.GetKeyStore().Eth().EnabledKeysForChain(testCtx, destChainID) + require.NoError(t, err) + require.Len(t, sendingKeys, 1) + transmitter := sendingKeys[0].Address + s, err := app.GetKeyStore().Eth().GetState(testCtx, sendingKeys[0].ID(), destChainID) + require.NoError(t, err) + lggr.Debug(fmt.Sprintf("Transmitter address %s chainID %s", transmitter, s.EVMChainID.String())) + + // Fund the commitTransmitter address with some ETH + n, err := destChain.NonceAt(context.Background(), owner.From, nil) + require.NoError(t, err) + + tx := types3.NewTransaction(n, transmitter, big.NewInt(1000000000000000000), 21000, big.NewInt(1000000000), nil) + signedTx, err := owner.Signer(owner.From, tx) + require.NoError(t, err) + err = destChain.SendTransaction(context.Background(), signedTx) + require.NoError(t, err) + destChain.Commit() + + kb, err := app.GetKeyStore().OCR2().Create(ctx, chaintype.EVM) + require.NoError(t, err) + return app, peerID.Raw(), transmitter, kb +} + +func createConfigV2Chain(chainId *big.Int) *v2.EVMConfig { + // NOTE: For the executor jobs, the default of 500k is insufficient for a 3 message batch + defaultGasLimit := uint64(5000000) + tr := true + + sourceC := v2.Defaults((*evmUtils.Big)(chainId)) + sourceC.GasEstimator.LimitDefault = &defaultGasLimit + fixedPrice := "FixedPrice" + sourceC.GasEstimator.Mode = &fixedPrice + d, _ := config.NewDuration(100 * time.Millisecond) + sourceC.LogPollInterval = &d + fd := uint32(2) + sourceC.FinalityDepth = &fd + return &v2.EVMConfig{ + ChainID: (*evmUtils.Big)(chainId), + Enabled: &tr, + Chain: sourceC, + Nodes: v2.EVMNodes{&v2.Node{}}, + } +} + +type CCIPIntegrationTestHarness struct { + CCIPContracts + Nodes []Node + Bootstrap Node +} + +func SetupCCIPIntegrationTH(t *testing.T, sourceChainID, sourceChainSelector, destChainId, destChainSelector uint64) CCIPIntegrationTestHarness { + return CCIPIntegrationTestHarness{ + CCIPContracts: SetupCCIPContracts(t, sourceChainID, sourceChainSelector, destChainId, destChainSelector), + } +} + +func (c *CCIPIntegrationTestHarness) CreatePricesPipeline(t *testing.T) (string, *httptest.Server, *httptest.Server) { + linkUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"UsdPerLink": "8000000000000000000"}`)) + require.NoError(t, err) + })) + ethUSD := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(`{"UsdPerETH": "1700000000000000000000"}`)) + require.NoError(t, err) + })) + sourceWrappedNative, err := c.Source.Router.GetWrappedNative(nil) + require.NoError(t, err) + destWrappedNative, err := c.Dest.Router.GetWrappedNative(nil) + require.NoError(t, err) + tokenPricesUSDPipeline := fmt.Sprintf(` +// Price 1 +link [type=http method=GET url="%s"]; +link_parse [type=jsonparse path="UsdPerLink"]; +link->link_parse; +eth [type=http method=GET url="%s"]; +eth_parse [type=jsonparse path="UsdPerETH"]; +eth->eth_parse; +merge [type=merge left="{}" right="{\\\"%s\\\":$(link_parse), \\\"%s\\\":$(eth_parse), \\\"%s\\\":$(eth_parse)}"];`, + linkUSD.URL, ethUSD.URL, c.Dest.LinkToken.Address(), sourceWrappedNative, destWrappedNative) + + return tokenPricesUSDPipeline, linkUSD, ethUSD +} + +func (c *CCIPIntegrationTestHarness) AddAllJobs(t *testing.T, jobParams integrationtesthelpers.CCIPJobSpecParams) { + jobParams.OffRamp = c.Dest.OffRamp.Address() + + commitSpec, err := jobParams.CommitJobSpec() + require.NoError(t, err) + geExecutionSpec, err := jobParams.ExecutionJobSpec() + require.NoError(t, err) + nodes := c.Nodes + for _, node := range nodes { + node.AddJobsWithSpec(t, commitSpec) + node.AddJobsWithSpec(t, geExecutionSpec) + } +} + +func (c *CCIPIntegrationTestHarness) jobSpecProposal(t *testing.T, specTemplate string, f func() (*integrationtesthelpers.OCR2TaskJobSpec, error), feedsManagerId int64, version int32, opts ...any) feeds2.ProposeJobArgs { + spec, err := f() + require.NoError(t, err) + + args := []any{spec.OCR2OracleSpec.ContractID} + args = append(args, opts...) + + return feeds2.ProposeJobArgs{ + FeedsManagerID: feedsManagerId, + RemoteUUID: uuid.New(), + Multiaddrs: nil, + Version: version, + Spec: fmt.Sprintf(specTemplate, args...), + } +} + +func (c *CCIPIntegrationTestHarness) SetupFeedsManager(t *testing.T) { + ctx := testutils.Context(t) + for _, node := range c.Nodes { + f := node.App.GetFeedsService() + + managers, err := f.ListManagers(ctx) + require.NoError(t, err) + if len(managers) > 0 { + // Use at most one feeds manager, don't register if one already exists + continue + } + + secret := utils.RandomBytes32() + pkey, err := crypto.PublicKeyFromHex(hex.EncodeToString(secret[:])) + require.NoError(t, err) + + m := feeds2.RegisterManagerParams{ + Name: "CCIP", + URI: "http://localhost:8080", + PublicKey: *pkey, + } + + _, err = f.RegisterManager(testutils.Context(t), m) + require.NoError(t, err) + + connManager := feedsMocks.NewConnectionsManager(t) + connManager.On("GetClient", mock.Anything).Maybe().Return(NoopFeedsClient{}, nil) + connManager.On("Close").Maybe().Return() + connManager.On("IsConnected", mock.Anything).Maybe().Return(true) + f.Unsafe_SetConnectionsManager(connManager) + } +} + +func (c *CCIPIntegrationTestHarness) ApproveJobSpecs(t *testing.T, jobParams integrationtesthelpers.CCIPJobSpecParams) { + ctx := testutils.Context(t) + + for _, node := range c.Nodes { + f := node.App.GetFeedsService() + managers, err := f.ListManagers(ctx) + require.NoError(t, err) + require.Len(t, managers, 1, "expected exactly one feeds manager") + + execSpec := c.jobSpecProposal( + t, + execSpecTemplate, + jobParams.ExecutionJobSpec, + managers[0].ID, + 1, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + utils.RandomAddress().String(), + utils.RandomAddress().String(), + ) + execId, err := f.ProposeJob(ctx, &execSpec) + require.NoError(t, err) + + err = f.ApproveSpec(ctx, execId, true) + require.NoError(t, err) + + var commitSpec feeds2.ProposeJobArgs + if jobParams.TokenPricesUSDPipeline != "" { + commitSpec = c.jobSpecProposal( + t, + commitSpecTemplatePipeline, + jobParams.CommitJobSpec, + managers[0].ID, + 2, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + jobParams.OffRamp.String(), + jobParams.TokenPricesUSDPipeline, + ) + } else { + commitSpec = c.jobSpecProposal( + t, + commitSpecTemplateDynamicPriceGetter, + jobParams.CommitJobSpec, + managers[0].ID, + 2, + node.KeyBundle.ID(), + node.Transmitter.Hex(), + jobParams.OffRamp.String(), + jobParams.PriceGetterConfig, + ) + } + + commitId, err := f.ProposeJob(ctx, &commitSpec) + require.NoError(t, err) + + err = f.ApproveSpec(ctx, commitId, true) + require.NoError(t, err) + } +} + +func (c *CCIPIntegrationTestHarness) AllNodesHaveReqSeqNum(t *testing.T, seqNum int, onRampOpts ...common.Address) logpoller.Log { + var log logpoller.Log + nodes := c.Nodes + var onRamp common.Address + if len(onRampOpts) > 0 { + onRamp = onRampOpts[0] + } else { + require.NotNil(t, c.Source.OnRamp, "no onramp configured") + onRamp = c.Source.OnRamp.Address() + } + for _, node := range nodes { + log = node.EventuallyHasReqSeqNum(t, c, onRamp, seqNum) + } + return log +} + +func (c *CCIPIntegrationTestHarness) AllNodesHaveExecutedSeqNums(t *testing.T, minSeqNum int, maxSeqNum int, offRampOpts ...common.Address) []logpoller.Log { + var logs []logpoller.Log + nodes := c.Nodes + var offRamp common.Address + + if len(offRampOpts) > 0 { + offRamp = offRampOpts[0] + } else { + require.NotNil(t, c.Dest.OffRamp, "no offramp configured") + offRamp = c.Dest.OffRamp.Address() + } + for _, node := range nodes { + logs = node.EventuallyHasExecutedSeqNums(t, c, offRamp, minSeqNum, maxSeqNum) + } + return logs +} + +func (c *CCIPIntegrationTestHarness) NoNodesHaveExecutedSeqNum(t *testing.T, seqNum int, offRampOpts ...common.Address) logpoller.Log { + var log logpoller.Log + nodes := c.Nodes + var offRamp common.Address + if len(offRampOpts) > 0 { + offRamp = offRampOpts[0] + } else { + require.NotNil(t, c.Dest.OffRamp, "no offramp configured") + offRamp = c.Dest.OffRamp.Address() + } + for _, node := range nodes { + log = node.ConsistentlySeqNumHasNotBeenExecuted(t, c, offRamp, seqNum) + } + return log +} + +func (c *CCIPIntegrationTestHarness) EventuallyCommitReportAccepted(t *testing.T, currentBlock uint64, commitStoreOpts ...common.Address) commit_store_1_2_0.CommitStoreCommitReport { + var commitStore *commit_store_1_2_0.CommitStore + var err error + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + g := gomega.NewGomegaWithT(t) + var report commit_store_1_2_0.CommitStoreCommitReport + g.Eventually(func() bool { + it, err := commitStore.FilterReportAccepted(&bind.FilterOpts{Start: currentBlock}) + g.Expect(err).NotTo(gomega.HaveOccurred(), "Error filtering ReportAccepted event") + g.Expect(it.Next()).To(gomega.BeTrue(), "No ReportAccepted event found") + report = it.Event.Report + if report.MerkleRoot != [32]byte{} { + t.Log("Report Accepted by commitStore") + return true + } + return false + }, testutils.WaitTimeout(t), 1*time.Second).Should(gomega.BeTrue(), "report has not been committed") + return report +} + +func (c *CCIPIntegrationTestHarness) EventuallyExecutionStateChangedToSuccess(t *testing.T, seqNum []uint64, blockNum uint64, offRampOpts ...common.Address) { + var offRamp *evm_2_evm_offramp_1_2_0.EVM2EVMOffRamp + var err error + if len(offRampOpts) > 0 { + offRamp, err = evm_2_evm_offramp_1_2_0.NewEVM2EVMOffRamp(offRampOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.OffRamp, "no offRamp configured") + offRamp = c.Dest.OffRamp + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + it, err := offRamp.FilterExecutionStateChanged(&bind.FilterOpts{Start: blockNum}, seqNum, [][32]byte{}) + require.NoError(t, err) + for it.Next() { + if cciptypes.MessageExecutionState(it.Event.State) == cciptypes.ExecutionStateSuccess { + t.Logf("ExecutionStateChanged event found for seqNum %d", it.Event.SequenceNumber) + return true + } + } + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + return false + }, testutils.WaitTimeout(t), time.Second). + Should(gomega.BeTrue(), "ExecutionStateChanged Event") +} + +func (c *CCIPIntegrationTestHarness) EventuallyReportCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) uint64 { + var commitStore *commit_store_1_2_0.CommitStore + var err error + var committedSeqNum uint64 + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + t.Log("next expected seq num reported", minSeqNum) + committedSeqNum = minSeqNum + return minSeqNum > uint64(max) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "report has not been committed") + return committedSeqNum +} + +func (c *CCIPIntegrationTestHarness) EventuallySendRequested(t *testing.T, seqNum uint64, onRampOpts ...common.Address) { + var onRamp *evm_2_evm_onramp_1_2_0.EVM2EVMOnRamp + var err error + if len(onRampOpts) > 0 { + onRamp, err = evm_2_evm_onramp_1_2_0.NewEVM2EVMOnRamp(onRampOpts[0], c.Source.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Source.OnRamp, "no onRamp configured") + onRamp = c.Source.OnRamp + } + gomega.NewGomegaWithT(t).Eventually(func() bool { + it, err := onRamp.FilterCCIPSendRequested(nil) + require.NoError(t, err) + for it.Next() { + if it.Event.Message.SequenceNumber == seqNum { + t.Log("sendRequested generated for", seqNum) + return true + } + } + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + return false + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeTrue(), "sendRequested has not been generated") +} + +func (c *CCIPIntegrationTestHarness) ConsistentlyReportNotCommitted(t *testing.T, max int, commitStoreOpts ...common.Address) { + var commitStore *commit_store_1_2_0.CommitStore + var err error + if len(commitStoreOpts) > 0 { + commitStore, err = commit_store_1_2_0.NewCommitStore(commitStoreOpts[0], c.Dest.Chain) + require.NoError(t, err) + } else { + require.NotNil(t, c.Dest.CommitStore, "no commitStore configured") + commitStore = c.Dest.CommitStore + } + gomega.NewGomegaWithT(t).Consistently(func() bool { + minSeqNum, err := commitStore.GetExpectedNextSequenceNumber(nil) + require.NoError(t, err) + c.Source.Chain.Commit() + c.Dest.Chain.Commit() + t.Log("min seq num reported", minSeqNum) + return minSeqNum > uint64(max) + }, testutils.WaitTimeout(t), time.Second).Should(gomega.BeFalse(), "report has been committed") +} + +func (c *CCIPIntegrationTestHarness) SetupAndStartNodes(ctx context.Context, t *testing.T, bootstrapNodePort int64) (Node, []Node, int64) { + appBootstrap, bootstrapPeerID, bootstrapTransmitter, bootstrapKb := setupNodeCCIP(t, c.Dest.User, bootstrapNodePort, + "bootstrap_ccip", c.Source.Chain, c.Dest.Chain, big.NewInt(0).SetUint64(c.Source.ChainID), + big.NewInt(0).SetUint64(c.Dest.ChainID), "", 0) + var ( + oracles []confighelper.OracleIdentityExtra + nodes []Node + ) + err := appBootstrap.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, appBootstrap.Stop()) + }) + bootstrapNode := Node{ + App: appBootstrap, + Transmitter: bootstrapTransmitter, + KeyBundle: bootstrapKb, + } + // Set up the minimum 4 oracles all funded with destination ETH + for i := int64(0); i < 4; i++ { + app, peerID, transmitter, kb := setupNodeCCIP( + t, + c.Dest.User, + int64(freeport.GetOne(t)), + fmt.Sprintf("oracle_ccip%d", i), + c.Source.Chain, + c.Dest.Chain, + big.NewInt(0).SetUint64(c.Source.ChainID), + big.NewInt(0).SetUint64(c.Dest.ChainID), + bootstrapPeerID, + bootstrapNodePort, + ) + nodes = append(nodes, Node{ + App: app, + Transmitter: transmitter, + KeyBundle: kb, + }) + offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x")) + oracles = append(oracles, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: offchainPublicKey, + TransmitAccount: types4.Account(transmitter.String()), + OffchainPublicKey: kb.OffchainPublicKey(), + PeerID: peerID, + }, + ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(), + }) + err = app.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, app.Stop()) + }) + } + + c.Oracles = oracles + commitOnchainConfig := c.CreateDefaultCommitOnchainConfig(t) + commitOffchainConfig := c.CreateDefaultCommitOffchainConfig(t) + execOnchainConfig := c.CreateDefaultExecOnchainConfig(t) + execOffchainConfig := c.CreateDefaultExecOffchainConfig(t) + + configBlock := c.SetupOnchainConfig(t, commitOnchainConfig, commitOffchainConfig, execOnchainConfig, execOffchainConfig) + c.Nodes = nodes + c.Bootstrap = bootstrapNode + return bootstrapNode, nodes, configBlock +} + +func (c *CCIPIntegrationTestHarness) SetUpNodesAndJobs(t *testing.T, pricePipeline string, priceGetterConfig string, usdcAttestationAPI string) integrationtesthelpers.CCIPJobSpecParams { + // setup Jobs + ctx := context.Background() + // Starts nodes and configures them in the OCR contracts. + bootstrapNode, _, configBlock := c.SetupAndStartNodes(ctx, t, int64(freeport.GetOne(t))) + + jobParams := c.NewCCIPJobSpecParams(pricePipeline, priceGetterConfig, configBlock, usdcAttestationAPI) + + // Add the bootstrap job + c.Bootstrap.AddBootstrapJob(t, jobParams.BootstrapJob(c.Dest.CommitStore.Address().Hex())) + c.AddAllJobs(t, jobParams) + + // Replay for bootstrap. + bc, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(strconv.FormatUint(c.Dest.ChainID, 10)) + require.NoError(t, err) + require.NoError(t, bc.LogPoller().Replay(context.Background(), configBlock)) + c.Dest.Chain.Commit() + + return jobParams +} + +func (c *CCIPIntegrationTestHarness) NewCCIPJobSpecParams(tokenPricesUSDPipeline string, priceGetterConfig string, configBlock int64, usdcAttestationAPI string) integrationtesthelpers.CCIPJobSpecParams { + return integrationtesthelpers.CCIPJobSpecParams{ + CommitStore: c.Dest.CommitStore.Address(), + OffRamp: c.Dest.OffRamp.Address(), + DestEvmChainId: c.Dest.ChainID, + SourceChainName: "SimulatedSource", + DestChainName: "SimulatedDest", + TokenPricesUSDPipeline: tokenPricesUSDPipeline, + PriceGetterConfig: priceGetterConfig, + DestStartBlock: uint64(configBlock), + USDCAttestationAPI: usdcAttestationAPI, + } +} + +func DecodeCommitOnChainConfig(encoded []byte) (ccipdata.CommitOnchainConfig, error) { + var onchainConfig ccipdata.CommitOnchainConfig + unpacked, err := abihelpers.DecodeOCR2Config(encoded) + if err != nil { + return onchainConfig, err + } + onChainCfg := unpacked.OnchainConfig + onchainConfig, err = abihelpers.DecodeAbiStruct[ccipdata.CommitOnchainConfig](onChainCfg) + if err != nil { + return onchainConfig, err + } + return onchainConfig, nil +} + +func DecodeExecOnChainConfig(encoded []byte) (v1_2_0.ExecOnchainConfig, error) { + var onchainConfig v1_2_0.ExecOnchainConfig + unpacked, err := abihelpers.DecodeOCR2Config(encoded) + if err != nil { + return onchainConfig, errors.Wrap(err, "failed to unpack log data") + } + onChainCfg := unpacked.OnchainConfig + onchainConfig, err = abihelpers.DecodeAbiStruct[v1_2_0.ExecOnchainConfig](onChainCfg) + if err != nil { + return onchainConfig, err + } + return onchainConfig, nil +} + +type ksa struct { + keystore.Master + csa keystore.CSA +} + +func (k *ksa) CSA() keystore.CSA { + return k.csa +} + +func NewKsa(db *sqlx.DB, lggr logger.Logger, csa keystore.CSA) *ksa { + return &ksa{ + Master: keystore.New(db, clutils.FastScryptParams, lggr), + csa: csa, + } +} + +type NoopFeedsClient struct{} + +func (n NoopFeedsClient) ApprovedJob(context.Context, *pb.ApprovedJobRequest) (*pb.ApprovedJobResponse, error) { + return &pb.ApprovedJobResponse{}, nil +} + +func (n NoopFeedsClient) Healthcheck(context.Context, *pb.HealthcheckRequest) (*pb.HealthcheckResponse, error) { + return &pb.HealthcheckResponse{}, nil +} + +func (n NoopFeedsClient) UpdateNode(context.Context, *pb.UpdateNodeRequest) (*pb.UpdateNodeResponse, error) { + return &pb.UpdateNodeResponse{}, nil +} + +func (n NoopFeedsClient) RejectedJob(context.Context, *pb.RejectedJobRequest) (*pb.RejectedJobResponse, error) { + return &pb.RejectedJobResponse{}, nil +} + +func (n NoopFeedsClient) CancelledJob(context.Context, *pb.CancelledJobRequest) (*pb.CancelledJobResponse, error) { + return &pb.CancelledJobResponse{}, nil +} diff --git a/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go new file mode 100644 index 00000000000..751ae5c1a92 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/testhelpers/testhelpers_1_4_0/config_1_4_0.go @@ -0,0 +1,73 @@ +// Package with set of configs that should be used only within tests suites + +package testhelpers_1_4_0 + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/smartcontractkit/chainlink-common/pkg/config" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/v1_2_0" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/testhelpers" +) + +var PermissionLessExecutionThresholdSeconds = uint32(testhelpers.FirstBlockAge.Seconds()) + +func (c *CCIPContracts) CreateDefaultCommitOnchainConfig(t *testing.T) []byte { + config, err := abihelpers.EncodeAbiStruct(ccipdata.CommitOnchainConfig{ + PriceRegistry: c.Dest.PriceRegistry.Address(), + }) + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultCommitOffchainConfig(t *testing.T) []byte { + return c.createCommitOffchainConfig(t, 10*time.Second, 5*time.Second) +} + +func (c *CCIPContracts) createCommitOffchainConfig(t *testing.T, feeUpdateHearBeat time.Duration, inflightCacheExpiry time.Duration) []byte { + config, err := NewCommitOffchainConfig( + *config.MustNewDuration(feeUpdateHearBeat), + 1, + 1, + *config.MustNewDuration(feeUpdateHearBeat), + 1, + *config.MustNewDuration(inflightCacheExpiry), + ).Encode() + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultExecOnchainConfig(t *testing.T) []byte { + config, err := abihelpers.EncodeAbiStruct(v1_2_0.ExecOnchainConfig{ + PermissionLessExecutionThresholdSeconds: PermissionLessExecutionThresholdSeconds, + Router: c.Dest.Router.Address(), + PriceRegistry: c.Dest.PriceRegistry.Address(), + MaxDataBytes: 1e5, + MaxNumberOfTokensPerMsg: 5, + MaxPoolReleaseOrMintGas: 200_000, + }) + require.NoError(t, err) + return config +} + +func (c *CCIPContracts) CreateDefaultExecOffchainConfig(t *testing.T) []byte { + return c.createExecOffchainConfig(t, 1*time.Minute, 1*time.Minute) +} + +func (c *CCIPContracts) createExecOffchainConfig(t *testing.T, inflightCacheExpiry time.Duration, rootSnoozeTime time.Duration) []byte { + config, err := NewExecOffchainConfig( + 1, + 5_000_000, + 0.07, + *config.MustNewDuration(inflightCacheExpiry), + *config.MustNewDuration(rootSnoozeTime), + ).Encode() + require.NoError(t, err) + return config +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/bgworker.go b/core/services/ocr2/plugins/ccip/tokendata/bgworker.go new file mode 100644 index 00000000000..1a74ab2305b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/bgworker.go @@ -0,0 +1,213 @@ +package tokendata + +import ( + "context" + "fmt" + "strconv" + "sync" + "time" + + "github.com/patrickmn/go-cache" + + "github.com/smartcontractkit/chainlink-common/pkg/services" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/job" +) + +type msgResult struct { + TokenAmountIndex int + Err error + Data []byte +} + +type Worker interface { + job.ServiceCtx + // AddJobsFromMsgs will include the provided msgs for background processing. + AddJobsFromMsgs(ctx context.Context, msgs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) + + // GetMsgTokenData returns the token data for the provided msg. If data are not ready it keeps waiting + // until they get ready. Important: Make sure to pass a proper context with timeout. + GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error) + + GetReaders() map[cciptypes.Address]Reader +} + +type BackgroundWorker struct { + tokenDataReaders map[cciptypes.Address]Reader + numWorkers int + jobsChan chan cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta + resultsCache *cache.Cache + timeoutDur time.Duration + + services.StateMachine + wg *sync.WaitGroup + backgroundCtx context.Context //nolint:containedctx + backgroundCancel context.CancelFunc +} + +func NewBackgroundWorker( + tokenDataReaders map[cciptypes.Address]Reader, + numWorkers int, + timeoutDur time.Duration, + expirationDur time.Duration, +) *BackgroundWorker { + if expirationDur == 0 { + expirationDur = 24 * time.Hour + } + + ctx, cancel := context.WithCancel(context.Background()) + return &BackgroundWorker{ + tokenDataReaders: tokenDataReaders, + numWorkers: numWorkers, + jobsChan: make(chan cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, numWorkers*100), + resultsCache: cache.New(expirationDur, expirationDur/2), + timeoutDur: timeoutDur, + + wg: new(sync.WaitGroup), + backgroundCtx: ctx, + backgroundCancel: cancel, + } +} + +func (w *BackgroundWorker) Start(context.Context) error { + return w.StateMachine.StartOnce("Token BackgroundWorker", func() error { + for i := 0; i < w.numWorkers; i++ { + w.wg.Add(1) + w.run() + } + return nil + }) +} + +func (w *BackgroundWorker) Close() error { + return w.StateMachine.StopOnce("Token BackgroundWorker", func() error { + w.backgroundCancel() + w.wg.Wait() + return nil + }) +} + +func (w *BackgroundWorker) AddJobsFromMsgs(ctx context.Context, msgs []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) { + w.wg.Add(1) + go func() { + defer w.wg.Done() + for _, msg := range msgs { + select { + case <-w.backgroundCtx.Done(): + return + case <-ctx.Done(): + return + default: + if len(msg.TokenAmounts) > 0 { + w.jobsChan <- msg + } + } + } + }() +} + +func (w *BackgroundWorker) GetReaders() map[cciptypes.Address]Reader { + return w.tokenDataReaders +} + +func (w *BackgroundWorker) GetMsgTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) ([][]byte, error) { + res, err := w.getMsgTokenData(ctx, msg.SequenceNumber) + if err != nil { + return nil, err + } + + tokenDatas := make([][]byte, len(msg.TokenAmounts)) + for _, r := range res { + if r.Err != nil { + return nil, r.Err + } + if r.TokenAmountIndex < 0 || r.TokenAmountIndex >= len(msg.TokenAmounts) { + return nil, fmt.Errorf("token data index inconsistency") + } + tokenDatas[r.TokenAmountIndex] = r.Data + } + + return tokenDatas, nil +} + +func (w *BackgroundWorker) run() { + go func() { + defer w.wg.Done() + for { + select { + case <-w.backgroundCtx.Done(): + return + case msg := <-w.jobsChan: + w.workOnMsg(w.backgroundCtx, msg) + } + } + }() +} + +func (w *BackgroundWorker) workOnMsg(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta) { + results := make([]msgResult, 0, len(msg.TokenAmounts)) + + cachedTokenData := make(map[int]msgResult) // tokenAmount index -> token data + if cachedData, exists := w.getFromCache(msg.SequenceNumber); exists { + for _, r := range cachedData { + cachedTokenData[r.TokenAmountIndex] = r + } + } + + for i, token := range msg.TokenAmounts { + offchainTokenDataProvider, exists := w.tokenDataReaders[token.Token] + if !exists { + // No token data required + continue + } + + // if the result exists in the cache and there wasn't any error keep the existing result + if cachedResult, exists := cachedTokenData[i]; exists && cachedResult.Err == nil { + results = append(results, cachedResult) + continue + } + + // if there was any error or if the data do not exist in the cache make a call to the provider + timeoutCtx, cf := context.WithTimeout(ctx, w.timeoutDur) + tknData, err := offchainTokenDataProvider.ReadTokenData(timeoutCtx, msg, i) + cf() + results = append(results, msgResult{ + TokenAmountIndex: i, + Err: err, + Data: tknData, + }) + } + + w.resultsCache.Set(strconv.FormatUint(msg.SequenceNumber, 10), results, cache.DefaultExpiration) +} + +func (w *BackgroundWorker) getMsgTokenData(ctx context.Context, seqNum uint64) ([]msgResult, error) { + if msgTokenData, exists := w.getFromCache(seqNum); exists { + return msgTokenData, nil + } + + ctx, cf := context.WithTimeout(ctx, w.timeoutDur) + defer cf() + + // wait until the results are ready or until context timeout is reached + tick := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-ctx.Done(): + return nil, context.DeadlineExceeded + case <-tick.C: + if msgTokenData, exists := w.getFromCache(seqNum); exists { + return msgTokenData, nil + } + } + } +} + +func (w *BackgroundWorker) getFromCache(seqNum uint64) ([]msgResult, bool) { + rawResult, found := w.resultsCache.Get(strconv.FormatUint(seqNum, 10)) + if !found { + return nil, false + } + return rawResult.([]msgResult), true +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go b/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go new file mode 100644 index 00000000000..5d505363ac7 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/bgworker_test.go @@ -0,0 +1,188 @@ +package tokendata_test + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" +) + +func TestBackgroundWorker(t *testing.T) { + ctx := testutils.Context(t) + + const numTokens = 100 + const numWorkers = 20 + const numMessages = 40 + const maxReaderLatencyMS = 200 + const percentOfTokensWithoutTokenData = 10 + + tokens := make([]cciptypes.Address, numTokens) + readers := make(map[cciptypes.Address]*tokendata.MockReader, numTokens) + tokenDataReaders := make(map[cciptypes.Address]tokendata.Reader, numTokens) + tokenData := make(map[cciptypes.Address][]byte) + delays := make(map[cciptypes.Address]time.Duration) + + for i := range tokens { + tokens[i] = cciptypes.Address(utils.RandomAddress().String()) + readers[tokens[i]] = tokendata.NewMockReader(t) + if rand.Intn(100) >= percentOfTokensWithoutTokenData { + tokenDataReaders[tokens[i]] = readers[tokens[i]] + tokenData[tokens[i]] = []byte(fmt.Sprintf("...token %x data...", tokens[i])) + } + + // specify a random latency for the reader implementation + readerLatency := rand.Intn(maxReaderLatencyMS) + delays[tokens[i]] = time.Duration(readerLatency) * time.Millisecond + } + w := tokendata.NewBackgroundWorker(tokenDataReaders, numWorkers, 5*time.Second, time.Hour) + require.NoError(t, w.Start(ctx)) + + msgs := make([]cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, numMessages) + for i := range msgs { + tk := tokens[i%len(tokens)] + + msgs[i] = cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: uint64(i + 1), + TokenAmounts: []cciptypes.TokenAmount{{Token: tk}}, + }, + } + + reader := readers[tk] + reader.On("ReadTokenData", mock.Anything, msgs[i], 0).Run(func(args mock.Arguments) { + time.Sleep(delays[tk]) + }).Return(tokenData[tk], nil).Maybe() + } + + w.AddJobsFromMsgs(ctx, msgs) + // processing of the messages should have started at this point + + tStart := time.Now() + for _, msg := range msgs { + b, err := w.GetMsgTokenData(ctx, msg) // fetched from provider + assert.NoError(t, err) + assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0]) + } + assert.True(t, time.Since(tStart) < 600*time.Millisecond) + assert.True(t, time.Since(tStart) > 200*time.Millisecond) + + tStart = time.Now() + for _, msg := range msgs { + b, err := w.GetMsgTokenData(ctx, msg) // fetched from cache + assert.NoError(t, err) + assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0]) + } + assert.True(t, time.Since(tStart) < 200*time.Millisecond) + + w.AddJobsFromMsgs(ctx, msgs) // same messages are added but they should already be in cache + tStart = time.Now() + for _, msg := range msgs { + b, err := w.GetMsgTokenData(ctx, msg) + assert.NoError(t, err) + assert.Equal(t, tokenData[msg.TokenAmounts[0].Token], b[0]) + } + assert.True(t, time.Since(tStart) < 200*time.Millisecond) + + require.NoError(t, w.Close()) +} + +func TestBackgroundWorker_RetryOnErrors(t *testing.T) { + ctx := testutils.Context(t) + + tk1 := cciptypes.Address(utils.RandomAddress().String()) + tk2 := cciptypes.Address(utils.RandomAddress().String()) + + rdr1 := tokendata.NewMockReader(t) + rdr2 := tokendata.NewMockReader(t) + + w := tokendata.NewBackgroundWorker(map[cciptypes.Address]tokendata.Reader{ + tk1: rdr1, + tk2: rdr2, + }, 10, 5*time.Second, time.Hour) + require.NoError(t, w.Start(ctx)) + + msgs := []cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: uint64(1), + TokenAmounts: []cciptypes.TokenAmount{{Token: tk1}}, + }}, + {EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: uint64(2), + TokenAmounts: []cciptypes.TokenAmount{{Token: tk2}}, + }}, + } + + rdr1.On("ReadTokenData", mock.Anything, msgs[0], 0). + Return([]byte("some data"), nil).Once() + + // reader2 returns an error + rdr2.On("ReadTokenData", mock.Anything, msgs[1], 0). + Return(nil, fmt.Errorf("some err")).Once() + + w.AddJobsFromMsgs(ctx, msgs) + // processing of the messages should have started at this point + + tokenData, err := w.GetMsgTokenData(ctx, msgs[0]) + assert.NoError(t, err) + assert.Equal(t, []byte("some data"), tokenData[0]) + + _, err = w.GetMsgTokenData(ctx, msgs[1]) + assert.Error(t, err) + assert.Errorf(t, err, "some error") + + // we make the second reader to return data + rdr2.On("ReadTokenData", mock.Anything, msgs[1], 0). + Return([]byte("some other data"), nil).Once() + + // add the jobs again, at this point jobs that previously returned + // an error are removed from the cache + w.AddJobsFromMsgs(ctx, msgs) + + // since reader1 returned some data before, we expect to get the cached result + tokenData, err = w.GetMsgTokenData(ctx, msgs[0]) + assert.NoError(t, err) + assert.Equal(t, []byte("some data"), tokenData[0]) + + // wait some time for msg2 to be re-processed and error overwritten + time.Sleep(20 * time.Millisecond) // todo: improve the test + + // for reader2 that returned an error before we expect to get data now + tokenData, err = w.GetMsgTokenData(ctx, msgs[1]) + assert.NoError(t, err) + assert.Equal(t, []byte("some other data"), tokenData[0]) + + require.NoError(t, w.Close()) +} + +func TestBackgroundWorker_Timeout(t *testing.T) { + ctx := testutils.Context(t) + + tk1 := cciptypes.Address(utils.RandomAddress().String()) + tk2 := cciptypes.Address(utils.RandomAddress().String()) + + rdr1 := tokendata.NewMockReader(t) + rdr2 := tokendata.NewMockReader(t) + + w := tokendata.NewBackgroundWorker( + map[cciptypes.Address]tokendata.Reader{tk1: rdr1, tk2: rdr2}, 10, 5*time.Second, time.Hour) + require.NoError(t, w.Start(ctx)) + + ctx, cf := context.WithTimeout(ctx, 500*time.Millisecond) + defer cf() + + _, err := w.GetMsgTokenData(ctx, cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{SequenceNumber: 1}}, + ) + assert.Error(t, err) + require.NoError(t, w.Close()) +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go b/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go new file mode 100644 index 00000000000..79ec21b1b83 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/http/http_client.go @@ -0,0 +1,48 @@ +package http + +import ( + "context" + "io" + "net/http" + "time" + + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" +) + +type IHttpClient interface { + // Get issue a GET request to the given url and return the response body and status code. + Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error) +} + +type HttpClient struct { +} + +func (s *HttpClient) Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error) { + // Use a timeout to guard against attestation API hanging, causing observation timeout and failing to make any progress. + timeoutCtx, cancel := context.WithTimeoutCause(ctx, timeout, tokendata.ErrTimeout) + defer cancel() + req, err := http.NewRequestWithContext(timeoutCtx, http.MethodGet, url, nil) + if err != nil { + return nil, http.StatusBadRequest, nil, err + } + req.Header.Add("accept", "application/json") + res, err := http.DefaultClient.Do(req) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return nil, http.StatusRequestTimeout, nil, tokendata.ErrTimeout + } + // On error, res is nil in most cases, do not read res.StatusCode, return BadRequest + return nil, http.StatusBadRequest, nil, err + } + defer res.Body.Close() + + // Explicitly signal if the API is being rate limited + if res.StatusCode == http.StatusTooManyRequests { + return nil, res.StatusCode, res.Header, tokendata.ErrRateLimit + } + + body, err := io.ReadAll(res.Body) + return body, res.StatusCode, res.Header, err +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go b/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go new file mode 100644 index 00000000000..d8fb9b1c576 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/http/observed_http_client.go @@ -0,0 +1,69 @@ +package http + +import ( + "context" + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + usdcLatencyBuckets = []float64{ + float64(10 * time.Millisecond), + float64(25 * time.Millisecond), + float64(50 * time.Millisecond), + float64(75 * time.Millisecond), + float64(100 * time.Millisecond), + float64(250 * time.Millisecond), + float64(500 * time.Millisecond), + float64(750 * time.Millisecond), + float64(1 * time.Second), + float64(2 * time.Second), + float64(3 * time.Second), + float64(4 * time.Second), + float64(5 * time.Second), + } + usdcClientHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ccip_usdc_client_request_total", + Help: "Latency of calls to the USDC client", + Buckets: usdcLatencyBuckets, + }, []string{"status", "success"}) +) + +type ObservedIHttpClient struct { + IHttpClient + histogram *prometheus.HistogramVec +} + +// NewObservedIHttpClient Create a new ObservedIHttpClient with the USDC client metric. +func NewObservedIHttpClient(origin IHttpClient) *ObservedIHttpClient { + return NewObservedIHttpClientWithMetric(origin, usdcClientHistogram) +} + +func NewObservedIHttpClientWithMetric(origin IHttpClient, histogram *prometheus.HistogramVec) *ObservedIHttpClient { + return &ObservedIHttpClient{ + IHttpClient: origin, + histogram: histogram, + } +} + +func (o *ObservedIHttpClient) Get(ctx context.Context, url string, timeout time.Duration) ([]byte, int, http.Header, error) { + return withObservedHttpClient(o.histogram, func() ([]byte, int, http.Header, error) { + return o.IHttpClient.Get(ctx, url, timeout) + }) +} + +func withObservedHttpClient[T any](histogram *prometheus.HistogramVec, contract func() (T, int, http.Header, error)) (T, int, http.Header, error) { + contractExecutionStarted := time.Now() + value, status, headers, err := contract() + histogram. + WithLabelValues( + strconv.FormatInt(int64(status), 10), + strconv.FormatBool(err == nil), + ). + Observe(float64(time.Since(contractExecutionStarted))) + return value, status, headers, err +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go b/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go new file mode 100644 index 00000000000..0567b725a8b --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/observability/usdc_client_test.go @@ -0,0 +1,151 @@ +package observability + +import ( + "context" + "encoding/json" + "math/big" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + http2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/http" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc" +) + +type expected struct { + status string + result string + count int +} + +func TestUSDCClientMonitoring(t *testing.T) { + tests := []struct { + name string + server *httptest.Server + requests int + expected []expected + }{ + { + name: "success", + server: newSuccessServer(t), + requests: 5, + expected: []expected{ + {"200", "true", 5}, + {"429", "false", 0}, + }, + }, + { + name: "rate_limited", + server: newRateLimitedServer(), + requests: 26, + expected: []expected{ + {"200", "true", 0}, + {"429", "false", 1}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testMonitoring(t, test.name, test.server, test.requests, test.expected, logger.TestLogger(t)) + }) + } +} + +func testMonitoring(t *testing.T, name string, server *httptest.Server, requests int, expected []expected, log logger.Logger) { + server.Start() + defer server.Close() + attestationURI, err := url.ParseRequestURI(server.URL) + require.NoError(t, err) + + // Define test histogram (avoid side effects from other tests if using the real usdcHistogram). + histogram := promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "test_client_histogram_" + name, + Help: "Latency of calls to the USDC mock client", + Buckets: []float64{float64(250 * time.Millisecond), float64(1 * time.Second), float64(5 * time.Second)}, + }, []string{"status", "success"}) + + // Mock USDC reader. + usdcReader := mocks.NewUSDCReader(t) + msgBody := []byte{0xb0, 0xd1} + usdcReader.On("GetUSDCMessagePriorToLogIndexInTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(msgBody, nil) + + // Service with monitored http client. + usdcTokenAddr := utils.RandomAddress() + observedHttpClient := http2.NewObservedIHttpClientWithMetric(&http2.HttpClient{}, histogram) + tokenDataReaderDefault := usdc.NewUSDCTokenDataReader(log, usdcReader, attestationURI, 0, usdcTokenAddr, usdc.APIIntervalRateLimitDisabled) + tokenDataReader := usdc.NewUSDCTokenDataReaderWithHttpClient(*tokenDataReaderDefault, observedHttpClient, usdcTokenAddr, usdc.APIIntervalRateLimitDisabled) + require.NotNil(t, tokenDataReader) + + for i := 0; i < requests; i++ { + _, _ = tokenDataReader.ReadTokenData(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + TokenAmounts: []cciptypes.TokenAmount{ + { + Token: ccipcalc.EvmAddrToGeneric(usdcTokenAddr), + Amount: big.NewInt(rand.Int63()), + }, + }, + }, + }, 0) + } + + // Check that the metrics are updated as expected. + for _, e := range expected { + assert.Equal(t, e.count, counterFromHistogramByLabels(t, histogram, e.status, e.result)) + } +} + +func counterFromHistogramByLabels(t *testing.T, histogramVec *prometheus.HistogramVec, labels ...string) int { + observer, err := histogramVec.GetMetricWithLabelValues(labels...) + require.NoError(t, err) + + metricCh := make(chan prometheus.Metric, 1) + observer.(prometheus.Histogram).Collect(metricCh) + close(metricCh) + + metric := <-metricCh + pb := &io_prometheus_client.Metric{} + err = metric.Write(pb) + require.NoError(t, err) + + return int(pb.GetHistogram().GetSampleCount()) +} + +func newSuccessServer(t *testing.T) *httptest.Server { + return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + response := struct { + Status string `json:"status"` + Attestation string `json:"attestation"` + }{ + Status: "complete", + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + responseBytes, err := json.Marshal(response) + require.NoError(t, err) + _, err = w.Write(responseBytes) + require.NoError(t, err) + })) +} + +func newRateLimitedServer() *httptest.Server { + return httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTooManyRequests) + })) +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/reader.go b/core/services/ocr2/plugins/ccip/tokendata/reader.go new file mode 100644 index 00000000000..16646bc7c5e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/reader.go @@ -0,0 +1,19 @@ +package tokendata + +import ( + "errors" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +var ( + ErrNotReady = errors.New("token data not ready") + ErrRateLimit = errors.New("token data API is being rate limited") + ErrTimeout = errors.New("token data API timed out") + ErrRequestsBlocked = errors.New("requests are currently blocked") +) + +// Reader is an interface for fetching offchain token data +type Reader interface { + cciptypes.TokenDataReader +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go b/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go new file mode 100644 index 00000000000..39166d61590 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/reader_mock.go @@ -0,0 +1,143 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package tokendata + +import ( + context "context" + + ccip "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + mock "github.com/stretchr/testify/mock" +) + +// MockReader is an autogenerated mock type for the Reader type +type MockReader struct { + mock.Mock +} + +type MockReader_Expecter struct { + mock *mock.Mock +} + +func (_m *MockReader) EXPECT() *MockReader_Expecter { + return &MockReader_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *MockReader) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type MockReader_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *MockReader_Expecter) Close() *MockReader_Close_Call { + return &MockReader_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *MockReader_Close_Call) Run(run func()) *MockReader_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockReader_Close_Call) Return(_a0 error) *MockReader_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockReader_Close_Call) RunAndReturn(run func() error) *MockReader_Close_Call { + _c.Call.Return(run) + return _c +} + +// ReadTokenData provides a mock function with given fields: ctx, msg, tokenIndex +func (_m *MockReader) ReadTokenData(ctx context.Context, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) ([]byte, error) { + ret := _m.Called(ctx, msg, tokenIndex) + + if len(ret) == 0 { + panic("no return value specified for ReadTokenData") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) ([]byte, error)); ok { + return rf(ctx, msg, tokenIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) []byte); ok { + r0 = rf(ctx, msg, tokenIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) error); ok { + r1 = rf(ctx, msg, tokenIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockReader_ReadTokenData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReadTokenData' +type MockReader_ReadTokenData_Call struct { + *mock.Call +} + +// ReadTokenData is a helper method to define mock.On call +// - ctx context.Context +// - msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta +// - tokenIndex int +func (_e *MockReader_Expecter) ReadTokenData(ctx interface{}, msg interface{}, tokenIndex interface{}) *MockReader_ReadTokenData_Call { + return &MockReader_ReadTokenData_Call{Call: _e.mock.On("ReadTokenData", ctx, msg, tokenIndex)} +} + +func (_c *MockReader_ReadTokenData_Call) Run(run func(ctx context.Context, msg ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int)) *MockReader_ReadTokenData_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta), args[2].(int)) + }) + return _c +} + +func (_c *MockReader_ReadTokenData_Call) Return(tokenData []byte, err error) *MockReader_ReadTokenData_Call { + _c.Call.Return(tokenData, err) + return _c +} + +func (_c *MockReader_ReadTokenData_Call) RunAndReturn(run func(context.Context, ccip.EVM2EVMOnRampCCIPSendRequestedWithMeta, int) ([]byte, error)) *MockReader_ReadTokenData_Call { + _c.Call.Return(run) + return _c +} + +// NewMockReader creates a new instance of MockReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockReader(t interface { + mock.TestingT + Cleanup(func()) +}) *MockReader { + mock := &MockReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go new file mode 100644 index 00000000000..fe3a86d2aff --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc.go @@ -0,0 +1,339 @@ +package usdc + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "golang.org/x/time/rate" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/http" +) + +const ( + apiVersion = "v1" + attestationPath = "attestations" + defaultAttestationTimeout = 5 * time.Second + + // defaultCoolDownDurationSec defines the default time to wait after getting rate limited. + // this value is only used if the 429 response does not contain the Retry-After header + defaultCoolDownDuration = 5 * time.Minute + + // maxCoolDownDuration defines the maximum duration we can wait till firing the next request + maxCoolDownDuration = 10 * time.Minute + + // defaultRequestInterval defines the rate in requests per second that the attestation API can be called. + // this is set according to the APIs documentated 10 requests per second rate limit. + defaultRequestInterval = 100 * time.Millisecond + + // APIIntervalRateLimitDisabled is a special value to disable the rate limiting. + APIIntervalRateLimitDisabled = -1 + // APIIntervalRateLimitDefault is a special value to select the default rate limit interval. + APIIntervalRateLimitDefault = 0 +) + +type attestationStatus string + +const ( + attestationStatusSuccess attestationStatus = "complete" + attestationStatusPending attestationStatus = "pending_confirmations" +) + +var ( + ErrUnknownResponse = errors.New("unexpected response from attestation API") +) + +// messageAndAttestation has to match the onchain struct `MessageAndAttestation` in the +// USDC token pool. +type messageAndAttestation struct { + Message []byte + Attestation []byte +} + +func (m messageAndAttestation) AbiString() string { + return ` + [{ + "components": [ + {"name": "message", "type": "bytes"}, + {"name": "attestation", "type": "bytes"} + ], + "type": "tuple" + }]` +} + +func (m messageAndAttestation) Validate() error { + if len(m.Message) == 0 { + return errors.New("message must be non-empty") + } + if len(m.Attestation) == 0 { + return errors.New("attestation must be non-empty") + } + return nil +} + +type TokenDataReader struct { + lggr logger.Logger + usdcReader ccipdata.USDCReader + httpClient http.IHttpClient + attestationApi *url.URL + attestationApiTimeout time.Duration + usdcTokenAddress common.Address + rate *rate.Limiter + + // coolDownUntil defines whether requests are blocked or not. + coolDownUntil time.Time + coolDownMu *sync.RWMutex +} + +type attestationResponse struct { + Status attestationStatus `json:"status"` + Attestation string `json:"attestation"` + Error string `json:"error"` +} + +var _ tokendata.Reader = &TokenDataReader{} + +func NewUSDCTokenDataReader( + lggr logger.Logger, + usdcReader ccipdata.USDCReader, + usdcAttestationApi *url.URL, + usdcAttestationApiTimeoutSeconds int, + usdcTokenAddress common.Address, + requestInterval time.Duration, +) *TokenDataReader { + timeout := time.Duration(usdcAttestationApiTimeoutSeconds) * time.Second + if usdcAttestationApiTimeoutSeconds == 0 { + timeout = defaultAttestationTimeout + } + + if requestInterval == APIIntervalRateLimitDisabled { + requestInterval = 0 + } else if requestInterval == APIIntervalRateLimitDefault { + requestInterval = defaultRequestInterval + } + + return &TokenDataReader{ + lggr: lggr, + usdcReader: usdcReader, + httpClient: http.NewObservedIHttpClient(&http.HttpClient{}), + attestationApi: usdcAttestationApi, + attestationApiTimeout: timeout, + usdcTokenAddress: usdcTokenAddress, + coolDownMu: &sync.RWMutex{}, + rate: rate.NewLimiter(rate.Every(requestInterval), 1), + } +} + +func NewUSDCTokenDataReaderWithHttpClient( + origin TokenDataReader, + httpClient http.IHttpClient, + usdcTokenAddress common.Address, + requestInterval time.Duration, +) *TokenDataReader { + return &TokenDataReader{ + lggr: origin.lggr, + usdcReader: origin.usdcReader, + httpClient: httpClient, + attestationApi: origin.attestationApi, + attestationApiTimeout: origin.attestationApiTimeout, + coolDownMu: origin.coolDownMu, + usdcTokenAddress: usdcTokenAddress, + rate: rate.NewLimiter(rate.Every(requestInterval), 1), + } +} + +// ReadTokenData queries the USDC attestation API to construct a message and +// attestation response. When called back to back, or multiple times +// concurrently, responses are delayed according how the request interval is +// configured. +func (s *TokenDataReader) ReadTokenData(ctx context.Context, msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) ([]byte, error) { + if tokenIndex < 0 || tokenIndex >= len(msg.TokenAmounts) { + return nil, fmt.Errorf("token index out of bounds") + } + + if s.inCoolDownPeriod() { + // rate limiting cool-down period, we prevent new requests from being sent + return nil, tokendata.ErrRequestsBlocked + } + + if s.rate != nil { + // Wait blocks until it the attestation API can be called or the + // context is Done. + if waitErr := s.rate.Wait(ctx); waitErr != nil { + return nil, fmt.Errorf("usdc rate limiting error: %w", waitErr) + } + } + + messageBody, err := s.getUSDCMessageBody(ctx, msg, tokenIndex) + if err != nil { + return []byte{}, errors.Wrap(err, "failed getting the USDC message body") + } + + msgID := hexutil.Encode(msg.MessageID[:]) + msgBody := hexutil.Encode(messageBody) + s.lggr.Infow("Calling attestation API", "messageBodyHash", msgBody, "messageID", msgID) + + // The attestation API expects the hash of the message body + attestationResp, err := s.callAttestationApi(ctx, utils.Keccak256Fixed(messageBody)) + if err != nil { + return []byte{}, errors.Wrap(err, "failed calling usdc attestation API ") + } + + s.lggr.Infow("Got response from attestation API", "messageID", msgID, + "attestationStatus", attestationResp.Status, "attestation", attestationResp.Attestation, + "attestationError", attestationResp.Error) + + switch attestationResp.Status { + case attestationStatusSuccess: + // The USDC pool needs a combination of the message body and the attestation + messageAndAttestation, err := encodeMessageAndAttestation(messageBody, attestationResp.Attestation) + if err != nil { + return nil, fmt.Errorf("failed to encode messageAndAttestation : %w", err) + } + return messageAndAttestation, nil + case attestationStatusPending: + return nil, tokendata.ErrNotReady + default: + s.lggr.Errorw("Unexpected response from attestation API", "attestationResp", attestationResp) + return nil, ErrUnknownResponse + } +} + +// encodeMessageAndAttestation encodes the message body and attestation into a single byte array +// that is readable onchain. +func encodeMessageAndAttestation(messageBody []byte, attestation string) ([]byte, error) { + attestationBytes, err := hex.DecodeString(strings.TrimPrefix(attestation, "0x")) + if err != nil { + return nil, fmt.Errorf("failed to decode response attestation: %w", err) + } + + return abihelpers.EncodeAbiStruct[messageAndAttestation](messageAndAttestation{ + Message: messageBody, + Attestation: attestationBytes, + }) +} + +func (s *TokenDataReader) getUSDCMessageBody( + ctx context.Context, + msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, + tokenIndex int, +) ([]byte, error) { + usdcTokenEndOffset, err := s.getUsdcTokenEndOffset(msg, tokenIndex) + if err != nil { + return nil, fmt.Errorf("get usdc token %d end offset: %w", tokenIndex, err) + } + + parsedMsgBody, err := s.usdcReader.GetUSDCMessagePriorToLogIndexInTx(ctx, int64(msg.LogIndex), usdcTokenEndOffset, msg.TxHash) + if err != nil { + return []byte{}, err + } + + s.lggr.Infow("Got USDC message body", "messageBody", hexutil.Encode(parsedMsgBody), "messageID", hexutil.Encode(msg.MessageID[:])) + return parsedMsgBody, nil +} + +func (s *TokenDataReader) getUsdcTokenEndOffset(msg cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta, tokenIndex int) (int, error) { + if tokenIndex >= len(msg.TokenAmounts) || tokenIndex < 0 { + return 0, fmt.Errorf("invalid token index %d for msg with %d tokens", tokenIndex, len(msg.TokenAmounts)) + } + + if msg.TokenAmounts[tokenIndex].Token != ccipcalc.EvmAddrToGeneric(s.usdcTokenAddress) { + return 0, fmt.Errorf("the specified token index %d is not a usdc token", tokenIndex) + } + + usdcTokenEndOffset := 0 + for i := tokenIndex + 1; i < len(msg.TokenAmounts); i++ { + evmTokenAddr, err := ccipcalc.GenericAddrToEvm(msg.TokenAmounts[i].Token) + if err != nil { + continue + } + + if evmTokenAddr == s.usdcTokenAddress { + usdcTokenEndOffset++ + } + } + + return usdcTokenEndOffset, nil +} + +// callAttestationApi calls the USDC attestation API with the given USDC message hash. +// The attestation service rate limit is 10 requests per second. If you exceed 10 requests +// per second, the service blocks all API requests for the next 5 minutes and returns an +// HTTP 429 response. +// +// Documentation: +// +// https://developers.circle.com/stablecoins/reference/getattestation +// https://developers.circle.com/stablecoins/docs/transfer-usdc-on-testnet-from-ethereum-to-avalanche +func (s *TokenDataReader) callAttestationApi(ctx context.Context, usdcMessageHash [32]byte) (attestationResponse, error) { + body, _, headers, err := s.httpClient.Get( + ctx, + fmt.Sprintf("%s/%s/%s/0x%x", s.attestationApi, apiVersion, attestationPath, usdcMessageHash), + s.attestationApiTimeout, + ) + switch { + case errors.Is(err, tokendata.ErrRateLimit): + coolDownDuration := defaultCoolDownDuration + if retryAfterHeader, exists := headers["Retry-After"]; exists && len(retryAfterHeader) > 0 { + if retryAfterSec, errParseInt := strconv.ParseInt(retryAfterHeader[0], 10, 64); errParseInt == nil { + coolDownDuration = time.Duration(retryAfterSec) * time.Second + } + } + s.setCoolDownPeriod(coolDownDuration) + + // Explicitly signal if the API is being rate limited + return attestationResponse{}, tokendata.ErrRateLimit + case err != nil: + return attestationResponse{}, fmt.Errorf("request error: %w", err) + } + + var response attestationResponse + err = json.Unmarshal(body, &response) + if err != nil { + return attestationResponse{}, err + } + if response.Error != "" { + return attestationResponse{}, fmt.Errorf("attestation API error: %s", response.Error) + } + if response.Status == "" { + return attestationResponse{}, fmt.Errorf("invalid attestation response: %s", string(body)) + } + return response, nil +} + +func (s *TokenDataReader) setCoolDownPeriod(d time.Duration) { + s.coolDownMu.Lock() + if d > maxCoolDownDuration { + d = maxCoolDownDuration + } + s.coolDownUntil = time.Now().Add(d) + s.coolDownMu.Unlock() +} + +func (s *TokenDataReader) inCoolDownPeriod() bool { + s.coolDownMu.RLock() + defer s.coolDownMu.RUnlock() + return time.Now().Before(s.coolDownUntil) +} + +func (s *TokenDataReader) Close() error { + return nil +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go new file mode 100644 index 00000000000..95b309ff74e --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_blackbox_test.go @@ -0,0 +1,119 @@ +package usdc_test + +import ( + "context" + "encoding/hex" + "encoding/json" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc" +) + +type attestationResponse struct { + Status string `json:"status"` + Attestation string `json:"attestation"` +} + +func TestUSDCReader_ReadTokenData(t *testing.T) { + tests := []struct { + name string + attestationResponse attestationResponse + expectedError error + }{ + { + name: "status complete", + attestationResponse: attestationResponse{ + Status: "complete", + Attestation: "0x9049623e91719ef2aa63c55f357be2529b0e7122ae552c18aff8db58b4633c4d3920ff03d3a6d1ddf11f06bf64d7fd60d45447ac81f527ba628877dc5ca759651b08ffae25a6d3b1411749765244f0a1c131cbfe04430d687a2e12fd9d2e6dc08e118ad95d94ad832332cf3c4f7a4f3da0baa803b7be024b02db81951c0f0714de1b", + }, + expectedError: nil, + }, + { + name: "status pending", + attestationResponse: attestationResponse{ + Status: "pending_confirmations", + Attestation: "720502893578a89a8a87982982ef781c18b193", + }, + expectedError: tokendata.ErrNotReady, + }, + { + name: "status invalid", + attestationResponse: attestationResponse{ + Status: "invalid", + Attestation: "720502893578a89a8a87982982ef781c18b193", + }, + expectedError: usdc.ErrUnknownResponse, + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + // Message is the bytes itself from MessageSend(bytes message) + // i.e. ABI parsed. + message := "0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861" + expectedMessageAndAttestation := "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000f80000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861000000000000000000000000000000000000000000000000000000000000000000000000000000829049623e91719ef2aa63c55f357be2529b0e7122ae552c18aff8db58b4633c4d3920ff03d3a6d1ddf11f06bf64d7fd60d45447ac81f527ba628877dc5ca759651b08ffae25a6d3b1411749765244f0a1c131cbfe04430d687a2e12fd9d2e6dc08e118ad95d94ad832332cf3c4f7a4f3da0baa803b7be024b02db81951c0f0714de1b000000000000000000000000000000000000000000000000000000000000" + lggr := logger.TestLogger(t) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + messageHash := utils.Keccak256Fixed(hexutil.MustDecode(message)) + expectedUrl := "/v1/attestations/0x" + hex.EncodeToString(messageHash[:]) + require.Equal(t, expectedUrl, r.URL.Path) + + responseBytes, err2 := json.Marshal(test.attestationResponse) + require.NoError(t, err2) + + _, err2 = w.Write(responseBytes) + require.NoError(t, err2) + })) + + defer ts.Close() + + seqNum := uint64(23825) + txHash := utils.RandomBytes32() + logIndex := int64(4) + + usdcReader := ccipdatamocks.USDCReader{} + usdcReader.On("GetUSDCMessagePriorToLogIndexInTx", + mock.Anything, + logIndex, + 0, + common.Hash(txHash).String(), + ).Return(hexutil.MustDecode(message), nil) + attestationURI, err := url.ParseRequestURI(ts.URL) + require.NoError(t, err) + + addr := utils.RandomAddress() + usdcService := usdc.NewUSDCTokenDataReader(lggr, &usdcReader, attestationURI, 0, addr, usdc.APIIntervalRateLimitDisabled) + msgAndAttestation, err := usdcService.ReadTokenData(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + SequenceNumber: seqNum, + TokenAmounts: []cciptypes.TokenAmount{{Token: ccipcalc.EvmAddrToGeneric(addr), Amount: nil}}, + }, + TxHash: cciptypes.Hash(txHash).String(), + LogIndex: uint(logIndex), + }, 0) + if test.expectedError != nil { + require.Error(t, err) + require.Equal(t, test.expectedError, err) + return + } + require.NoError(t, err) + // Expected attestation for parsed body. + require.Equal(t, expectedMessageAndAttestation, hexutil.Encode(msgAndAttestation)) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go new file mode 100644 index 00000000000..c4221b2dc0f --- /dev/null +++ b/core/services/ocr2/plugins/ccip/tokendata/usdc/usdc_test.go @@ -0,0 +1,423 @@ +package usdc + +import ( + "context" + "encoding/json" + "math/big" + "math/rand" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipcalc" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata" + ccipdatamocks "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/internal/ccipdata/mocks" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata" +) + +var ( + mockMsgTransmitter = utils.RandomAddress() +) + +func TestUSDCReader_callAttestationApi(t *testing.T) { + t.Skipf("Skipping test because it uses the real USDC attestation API") + usdcMessageHash := "912f22a13e9ccb979b621500f6952b2afd6e75be7eadaed93fc2625fe11c52a2" + attestationURI, err := url.ParseRequestURI("https://iris-api-sandbox.circle.com") + require.NoError(t, err) + lggr := logger.TestLogger(t) + usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, nil, false) + usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, common.Address{}, APIIntervalRateLimitDisabled) + + attestation, err := usdcService.callAttestationApi(context.Background(), [32]byte(common.FromHex(usdcMessageHash))) + require.NoError(t, err) + + require.Equal(t, attestationStatusPending, attestation.Status) + require.Equal(t, "PENDING", attestation.Attestation) +} + +func TestUSDCReader_callAttestationApiMock(t *testing.T) { + response := attestationResponse{ + Status: attestationStatusSuccess, + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + + ts := getMockUSDCEndpoint(t, response) + defer ts.Close() + attestationURI, err := url.ParseRequestURI(ts.URL) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + lp := mocks.NewLogPoller(t) + usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false) + usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, common.Address{}, APIIntervalRateLimitDisabled) + attestation, err := usdcService.callAttestationApi(context.Background(), utils.RandomBytes32()) + require.NoError(t, err) + + require.Equal(t, response.Status, attestation.Status) + require.Equal(t, response.Attestation, attestation.Attestation) +} + +func TestUSDCReader_callAttestationApiMockError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + getTs func() *httptest.Server + parentTimeoutSeconds int + customTimeoutSeconds int + expectedError error + }{ + { + name: "server error", + getTs: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + }, + parentTimeoutSeconds: 60, + expectedError: nil, + }, + { + name: "default timeout", + getTs: func() *httptest.Server { + response := attestationResponse{ + Status: attestationStatusSuccess, + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + responseBytes, _ := json.Marshal(response) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(defaultAttestationTimeout + time.Second) + _, err := w.Write(responseBytes) + require.NoError(t, err) + })) + }, + parentTimeoutSeconds: 60, + expectedError: tokendata.ErrTimeout, + }, + { + name: "custom timeout", + getTs: func() *httptest.Server { + response := attestationResponse{ + Status: attestationStatusSuccess, + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + responseBytes, _ := json.Marshal(response) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(2*time.Second + time.Second) + _, err := w.Write(responseBytes) + require.NoError(t, err) + })) + }, + parentTimeoutSeconds: 60, + customTimeoutSeconds: 2, + expectedError: tokendata.ErrTimeout, + }, + { + name: "error response", + getTs: func() *httptest.Server { + response := attestationResponse{ + Error: "some error", + } + responseBytes, _ := json.Marshal(response) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write(responseBytes) + require.NoError(t, err) + })) + }, + parentTimeoutSeconds: 60, + expectedError: nil, + }, + { + name: "invalid status", + getTs: func() *httptest.Server { + response := attestationResponse{ + Status: "", + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + responseBytes, _ := json.Marshal(response) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write(responseBytes) + require.NoError(t, err) + })) + }, + parentTimeoutSeconds: 60, + expectedError: nil, + }, + { + name: "rate limit", + getTs: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTooManyRequests) + })) + }, + parentTimeoutSeconds: 60, + expectedError: tokendata.ErrRateLimit, + }, + { + name: "parent context timeout", + getTs: func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(defaultAttestationTimeout + time.Second) + })) + }, + parentTimeoutSeconds: 1, + expectedError: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ts := test.getTs() + defer ts.Close() + + attestationURI, err := url.ParseRequestURI(ts.URL) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + lp := mocks.NewLogPoller(t) + usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false) + usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, test.customTimeoutSeconds, common.Address{}, APIIntervalRateLimitDisabled) + lp.On("RegisterFilter", mock.Anything, mock.Anything).Return(nil) + require.NoError(t, usdcReader.RegisterFilters()) + + parentCtx, cancel := context.WithTimeout(context.Background(), time.Duration(test.parentTimeoutSeconds)*time.Second) + defer cancel() + + _, err = usdcService.callAttestationApi(parentCtx, utils.RandomBytes32()) + require.Error(t, err) + + if test.expectedError != nil { + require.True(t, errors.Is(err, test.expectedError)) + } + lp.On("UnregisterFilter", mock.Anything, mock.Anything).Return(nil) + require.NoError(t, usdcReader.Close()) + }) + } +} + +func getMockUSDCEndpoint(t *testing.T, response attestationResponse) *httptest.Server { + responseBytes, err := json.Marshal(response) + require.NoError(t, err) + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := w.Write(responseBytes) + require.NoError(t, err) + })) +} + +func TestGetUSDCMessageBody(t *testing.T) { + expectedBody := []byte("0x0000000000000001000000020000000000048d71000000000000000000000000eb08f243e5d3fcff26a9e38ae5520a669f4019d000000000000000000000000023a04d5935ed8bc8e3eb78db3541f0abfb001c6e0000000000000000000000006cb3ed9b441eb674b58495c8b3324b59faff5243000000000000000000000000000000005425890298aed601595a70ab815c96711a31bc65000000000000000000000000ab4f961939bfe6a93567cc57c59eed7084ce2131000000000000000000000000000000000000000000000000000000000000271000000000000000000000000035e08285cfed1ef159236728f843286c55fc0861") + usdcReader := ccipdatamocks.USDCReader{} + usdcReader.On("GetUSDCMessagePriorToLogIndexInTx", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(expectedBody, nil) + + usdcTokenAddr := utils.RandomAddress() + lggr := logger.TestLogger(t) + usdcService := NewUSDCTokenDataReader(lggr, &usdcReader, nil, 0, usdcTokenAddr, APIIntervalRateLimitDisabled) + + // Make the first call and assert the underlying function is called + body, err := usdcService.getUSDCMessageBody(context.Background(), cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + TokenAmounts: []cciptypes.TokenAmount{ + { + Token: ccipcalc.EvmAddrToGeneric(usdcTokenAddr), + Amount: big.NewInt(rand.Int63()), + }, + }, + }, + }, 0) + require.NoError(t, err) + require.Equal(t, body, expectedBody) + + usdcReader.AssertNumberOfCalls(t, "GetUSDCMessagePriorToLogIndexInTx", 1) +} + +func TestTokenDataReader_getUsdcTokenEndOffset(t *testing.T) { + usdcToken := utils.RandomAddress() + nonUsdcToken := utils.RandomAddress() + + multipleTokens := []common.Address{ + usdcToken, // 2 + nonUsdcToken, + nonUsdcToken, + usdcToken, // 1 + usdcToken, // 0 + nonUsdcToken, + } + + testCases := []struct { + name string + tokens []common.Address + tokenIndex int + expOffset int + expErr bool + }{ + {name: "one non usdc token", tokens: []common.Address{nonUsdcToken}, tokenIndex: 0, expOffset: 0, expErr: true}, + {name: "one usdc token", tokens: []common.Address{usdcToken}, tokenIndex: 0, expOffset: 0, expErr: false}, + {name: "one usdc token wrong index", tokens: []common.Address{usdcToken}, tokenIndex: 1, expOffset: 0, expErr: true}, + {name: "multiple tokens 1", tokens: multipleTokens, tokenIndex: 0, expOffset: 2}, + {name: "multiple tokens - non usdc selected", tokens: multipleTokens, tokenIndex: 2, expErr: true}, + {name: "multiple tokens 2", tokens: multipleTokens, tokenIndex: 3, expOffset: 1}, + {name: "multiple tokens 3", tokens: multipleTokens, tokenIndex: 4, expOffset: 0}, + {name: "multiple tokens not found", tokens: multipleTokens, tokenIndex: 5, expErr: true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r := &TokenDataReader{usdcTokenAddress: usdcToken} + tokenAmounts := make([]cciptypes.TokenAmount, len(tc.tokens)) + for i := range tokenAmounts { + tokenAmounts[i] = cciptypes.TokenAmount{ + Token: ccipcalc.EvmAddrToGeneric(tc.tokens[i]), + Amount: big.NewInt(rand.Int63()), + } + } + msg := cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{EVM2EVMMessage: cciptypes.EVM2EVMMessage{TokenAmounts: tokenAmounts}} + offset, err := r.getUsdcTokenEndOffset(msg, tc.tokenIndex) + if tc.expErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.expOffset, offset) + }) + } +} + +func TestUSDCReader_rateLimiting(t *testing.T) { + testCases := []struct { + name string + requests uint64 + rateConfig time.Duration + testDuration time.Duration + timeout time.Duration + err string + }{ + { + name: "no rate limit when disabled", + requests: 10, + rateConfig: APIIntervalRateLimitDisabled, + testDuration: 1 * time.Millisecond, + }, + { + name: "yes rate limited with default config", + requests: 5, + rateConfig: APIIntervalRateLimitDefault, + testDuration: 4 * defaultRequestInterval, + }, + { + name: "yes rate limited with config", + requests: 10, + rateConfig: 50 * time.Millisecond, + testDuration: 9 * 50 * time.Millisecond, + }, + { + name: "timeout after first request", + requests: 5, + rateConfig: 100 * time.Millisecond, + testDuration: 1 * time.Millisecond, + timeout: 1 * time.Millisecond, + err: "usdc rate limiting error:", + }, + { + name: "timeout after second request", + requests: 5, + rateConfig: 100 * time.Millisecond, + testDuration: 100 * time.Millisecond, + timeout: 150 * time.Millisecond, + err: "usdc rate limiting error:", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + response := attestationResponse{ + Status: attestationStatusSuccess, + Attestation: "720502893578a89a8a87982982ef781c18b193", + } + + ts := getMockUSDCEndpoint(t, response) + defer ts.Close() + attestationURI, err := url.ParseRequestURI(ts.URL) + require.NoError(t, err) + + lggr := logger.TestLogger(t) + lp := mocks.NewLogPoller(t) + usdcReader, _ := ccipdata.NewUSDCReader(lggr, "job_123", mockMsgTransmitter, lp, false) + usdcService := NewUSDCTokenDataReader(lggr, usdcReader, attestationURI, 0, utils.RandomAddress(), tc.rateConfig) + + ctx := context.Background() + if tc.timeout > 0 { + var cf context.CancelFunc + ctx, cf = context.WithTimeout(ctx, tc.timeout) + defer cf() + } + + trigger := make(chan struct{}) + errorChan := make(chan error, tc.requests) + wg := sync.WaitGroup{} + for i := 0; i < int(tc.requests); i++ { + wg.Add(1) + go func() { + defer wg.Done() + + <-trigger + _, err := usdcService.ReadTokenData(ctx, cciptypes.EVM2EVMOnRampCCIPSendRequestedWithMeta{ + EVM2EVMMessage: cciptypes.EVM2EVMMessage{ + TokenAmounts: []cciptypes.TokenAmount{{Token: ccipcalc.EvmAddrToGeneric(utils.ZeroAddress), Amount: nil}}, // trigger failure due to wrong address + }, + }, 0) + + errorChan <- err + }() + } + + // Start the test + start := time.Now() + close(trigger) + + // Wait for requests to complete + wg.Wait() + finish := time.Now() + close(errorChan) + + // Collect errors + errorFound := false + for err := range errorChan { + if tc.err != "" && strings.Contains(err.Error(), tc.err) { + errorFound = true + } else if err != nil && !strings.Contains(err.Error(), "get usdc token 0 end offset") { + // Ignore that one error, it's expected because of how mocking is used. + // Anything else is unexpected. + require.Fail(t, "unexpected error", err) + } + } + + if tc.err != "" { + assert.True(t, errorFound) + } + assert.WithinDuration(t, start.Add(tc.testDuration), finish, 50*time.Millisecond) + }) + } +} diff --git a/core/services/ocr2/plugins/ccip/transactions.rlp b/core/services/ocr2/plugins/ccip/transactions.rlp new file mode 100644 index 0000000000000000000000000000000000000000..96cfc2f48238eef5e38307f425334221e5a48903 GIT binary patch literal 115794 zcmeI5TZmRw6vxkLnz2U5%QT~oI{E2%ziW6)s$*7Ggpol(B}^BCh#=7l?B!@yLf40) zD2fl%E|5{FJtV}z%Zq_obbaaz3HfuAMX9I_x^t`-KARt?*-?5#t|mJz>hOJ2L7>#HYj|8wuMaa&ezxPQxa8$UVI(V2gd?mxQc z$#oAW@~a}2)BRPko+R@9Nus%bkla3fswQ5L?!Rz(=&4`M{`M*V>R`0ntHFMex;^9UQ@4V>Jv2eiJE$!rannipRB1*(bT7E z>eDp!>6-csO?{@OK1)-dt*Ot^)aPpI^ECDOn!3&(ntEF%_wUH%dRHOWb-%s(GWq@) zxm=%L$n~XiImt5lf|a>kUscHUyX11RCX+9CD3|Mx7IJ;PTuwG-@&%i6xxTfK>#xe? zsA=eMd<>Z@8zTl@^uK!ZV^<#25`8|^__#>C= ze-~ms)%@`?9h}Mccjj`vr;zI!eWueg`Tn`NTwhek^=rj)lHQn!7o@l4a{bOiuHP$` zlk|a1ydZrf7wgHOid;`L_0}bt`%|g@-(NxLYxmUEKY#e`Gp8rtdiwW++aKDoaK@|s zw62K7{z>{=RdLS+Qz!bK|9RKh&(ocYcfIocq0=v{-Fo1)3toJ#kmho|wMy%))9mJ!^OOv~?fbeaD>#zB~SGZAnk-;Z65_kUnty@adx; zy-n}uam(>`yg&d1f+xU#?;B+XeEmnYe>7bN?AO@u=d7IjQ~dg%1%HbD;m_dL3I+s$ zC-x8P5)Kl~Z|TET!I5Cs9Efo4MZlhE?e zpW^p_vEWa!Kl~ZRYQc~o5Ig~*fo4MZlhE?epY7!MEVSTHu|ND7{93_)AP@xswmi)e z@F(Hqp+Cj%A8Ns$Vt@EEiq(Q4K_GYnL<7x)@F$_=p+Cj%uWiAfVt@EE__cxoK_ChO zL<7x)@F$_=p+7sw@mXlWpJIRbGm6!MAweK`0&IDjCE!oO$wPmNKcC5hKgIs=XYgwU z1A;&l1c(Nj3E@vd%R_&PKflm|KgIs=XB4XiLxMo?1c(Nj3E@vd%R_&PKi}7aKgIs= zXYgwU1A;&l1c(Nj3E@vd%R_&TAjfB+1%HbD;m;^m3x))N;0dthX_kOL2`3Nz*-4Ji zLJR&B`@^5XuN4dk0#OiP%hN0Ye-cg}`tu5Md=^^pr`R96iaa~wH73oZCl><@nizg93H2t+}EEl;xq z{7E=@=+9nqd=^^pr`R9Kkt%hN0Ye-cg}`m;ig&q53S6#K)U z!LJnz2m(S!ltZVt@EEiq(Q4K_GYnY*w*(4SMu@mXlWpJIRbGm6!M zAweK`0&IDjCE!oO$wPlmBgbc<1%HbD;m_dL3I+s$CCSS=V51cE2PmZw<){v@0{^ydt6d=^^pr`R9<41TR(KoE$609&4B3HXz6^3b0% z$?;ie!JlG(_%n*tf+0a5cmiyBnkC>*!pTE_&LYQWp#^`6{o&8x*9ry%fhY*D+tRt};|c-TlgRBRR5YgvwxaMVW3Sd-6u8 z493)y=|*x~)(DkBZ&jIYB*!-*bx`S7=j+)CMLkl#_f^#SdUjG?kJRszYwCPGJ2k6E z>i20?b-tdJzr9D2%Fv4bR)U6(g>Bo)wwd=NG_{2LS=AGrc5`I*H#;$GRTzKGP%6Jj8)O~ zg(_R-Zm5*8D!MUOWy{=6wK7&kH)pDBnY*Q0#>!}bzZF;L?k^Pg&{U0nz~hvs24vF8 zfDirq0DRs4nt?C)x+UPYJDy+gbxXi)cRat~>z07q?s$H|*DV3J-SPZ_uUi6cyW{x< zU$+F@cE|GzzHSM)?T+UceBBao+a1p@__`(FwmY6*@O4YTZFfAs;Omxv+wORN!PhMT zx83plg0EWwZoA|81z)!W+;+$F3%+g%xb2ST7ku3kaN8ZvFZjA8;I=!SU+{HHz-@Ot zzu@bZfZOhPe!d22B2VYv8!-m9zeQzgjuebLq<`58ko< f%sW4PJ^b+#k36<=Maz-pBVSzc_B*Fe4&U$(Vy%t% literal 0 HcmV?d00001 diff --git a/core/services/ocr2/plugins/ccip/transmitter/transmitter.go b/core/services/ocr2/plugins/ccip/transmitter/transmitter.go new file mode 100644 index 00000000000..3e2962b33a9 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/transmitter/transmitter.go @@ -0,0 +1,143 @@ +package transmitter + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink/v2/common/txmgr/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + statuschecker "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/statuschecker" +) + +type roundRobinKeystore interface { + GetRoundRobinAddress(ctx context.Context, chainID *big.Int, addresses ...common.Address) (address common.Address, err error) +} + +type txManager interface { + CreateTransaction(ctx context.Context, txRequest txmgr.TxRequest) (tx txmgr.Tx, err error) + GetTransactionStatus(ctx context.Context, transactionID string) (state commontypes.TransactionStatus, err error) +} + +type Transmitter interface { + CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error + FromAddress() common.Address +} + +type transmitter struct { + txm txManager + fromAddresses []common.Address + gasLimit uint64 + effectiveTransmitterAddress common.Address + strategy types.TxStrategy + checker txmgr.TransmitCheckerSpec + chainID *big.Int + keystore roundRobinKeystore + statuschecker statuschecker.CCIPTransactionStatusChecker // Used for CCIP's idempotency key generation +} + +// NewTransmitter creates a new eth transmitter +func NewTransmitter( + txm txManager, + fromAddresses []common.Address, + gasLimit uint64, + effectiveTransmitterAddress common.Address, + strategy types.TxStrategy, + checker txmgr.TransmitCheckerSpec, + chainID *big.Int, + keystore roundRobinKeystore, +) (Transmitter, error) { + // Ensure that a keystore is provided. + if keystore == nil { + return nil, errors.New("nil keystore provided to transmitter") + } + + return &transmitter{ + txm: txm, + fromAddresses: fromAddresses, + gasLimit: gasLimit, + effectiveTransmitterAddress: effectiveTransmitterAddress, + strategy: strategy, + checker: checker, + chainID: chainID, + keystore: keystore, + }, nil +} + +func NewTransmitterWithStatusChecker( + txm txManager, + fromAddresses []common.Address, + gasLimit uint64, + effectiveTransmitterAddress common.Address, + strategy types.TxStrategy, + checker txmgr.TransmitCheckerSpec, + chainID *big.Int, + keystore roundRobinKeystore, +) (Transmitter, error) { + t, err := NewTransmitter(txm, fromAddresses, gasLimit, effectiveTransmitterAddress, strategy, checker, chainID, keystore) + + if err != nil { + return nil, err + } + + transmitter, ok := t.(*transmitter) + if !ok { + return nil, errors.New("failed to type assert Transmitter to *transmitter") + } + transmitter.statuschecker = statuschecker.NewTxmStatusChecker(txm.GetTransactionStatus) + + return transmitter, nil +} + +func (t *transmitter) CreateEthTransaction(ctx context.Context, toAddress common.Address, payload []byte, txMeta *txmgr.TxMeta) error { + roundRobinFromAddress, err := t.keystore.GetRoundRobinAddress(ctx, t.chainID, t.fromAddresses...) + if err != nil { + return fmt.Errorf("skipped OCR transmission, error getting round-robin address: %w", err) + } + + var idempotencyKey *string + + // Define idempotency key for CCIP Execution Plugin + if len(txMeta.MessageIDs) == 1 && t.statuschecker != nil { + messageId := txMeta.MessageIDs[0] + _, count, err1 := t.statuschecker.CheckMessageStatus(ctx, messageId) + + if err1 != nil { + return errors.Wrap(err, "skipped OCR transmission, error getting message status") + } + idempotencyKey = func() *string { + s := fmt.Sprintf("%s-%d", messageId, count+1) + return &s + }() + } + + _, err = t.txm.CreateTransaction(ctx, txmgr.TxRequest{ + IdempotencyKey: idempotencyKey, + FromAddress: roundRobinFromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: t.gasLimit, + ForwarderAddress: t.forwarderAddress(), + Strategy: t.strategy, + Checker: t.checker, + Meta: txMeta, + }) + return errors.Wrap(err, "skipped OCR transmission") +} + +func (t *transmitter) FromAddress() common.Address { + return t.effectiveTransmitterAddress +} + +func (t *transmitter) forwarderAddress() common.Address { + for _, a := range t.fromAddresses { + if a == t.effectiveTransmitterAddress { + return common.Address{} + } + } + return t.effectiveTransmitterAddress +} diff --git a/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go b/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go new file mode 100644 index 00000000000..d177f1baa5c --- /dev/null +++ b/core/services/ocr2/plugins/ccip/transmitter/transmitter_test.go @@ -0,0 +1,282 @@ +package transmitter + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + + "github.com/smartcontractkit/chainlink-common/pkg/sqlutil" + "github.com/smartcontractkit/chainlink-common/pkg/types" + commontxmmocks "github.com/smartcontractkit/chainlink/v2/common/txmgr/types/mocks" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + txmmocks "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore" + "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" + "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" + + "github.com/smartcontractkit/chainlink/v2/core/utils" +) + +var ( + FixtureChainID = *testutils.FixtureChainID + Password = testutils.Password +) + +func newMockTxStrategy(t *testing.T) *commontxmmocks.TxStrategy { + return commontxmmocks.NewTxStrategy(t) +} + +func Test_DefaultTransmitter_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + ethKeyStore := NewKeyStore(t, db).Eth() + + _, fromAddress := MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint64(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := fromAddress + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + ethKeyStore := NewKeyStore(t, db).Eth() + + _, fromAddress := MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint64(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress, fromAddress2}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + FromAddress: fromAddress2, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: nil, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_Round_Robin_Error(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + ethKeyStore := NewKeyStore(t, db).Eth() + + fromAddress := common.Address{} + + gasLimit := uint64(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + transmitter, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + require.Error(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, nil)) +} + +func Test_DefaultTransmitter_Forwarding_Enabled_CreateEthTransaction_No_Keystore_Error(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + ethKeyStore := NewKeyStore(t, db).Eth() + + _, fromAddress := MustInsertRandomKey(t, ethKeyStore) + _, fromAddress2 := MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint64(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := common.Address{} + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + + _, err := ocrcommon.NewTransmitter( + txm, + []common.Address{fromAddress, fromAddress2}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + nil, + ) + require.Error(t, err) +} + +func Test_Transmitter_With_StatusChecker_CreateEthTransaction(t *testing.T) { + t.Parallel() + + db := pgtest.NewSqlxDB(t) + ethKeyStore := NewKeyStore(t, db).Eth() + + _, fromAddress := MustInsertRandomKey(t, ethKeyStore) + + gasLimit := uint64(1000) + chainID := big.NewInt(0) + effectiveTransmitterAddress := fromAddress + txm := txmmocks.NewMockEvmTxManager(t) + strategy := newMockTxStrategy(t) + toAddress := testutils.NewAddress() + payload := []byte{1, 2, 3} + idempotencyKey := "1-0" + txMeta := &txmgr.TxMeta{MessageIDs: []string{"1"}} + + transmitter, err := NewTransmitterWithStatusChecker( + txm, + []common.Address{fromAddress}, + gasLimit, + effectiveTransmitterAddress, + strategy, + txmgr.TransmitCheckerSpec{}, + chainID, + ethKeyStore, + ) + require.NoError(t, err) + + // This case is for when the message ID was not found in the status checker + txm.On("GetTransactionStatus", mock.Anything, idempotencyKey).Return(types.Unknown, errors.New("dummy")).Once() + + txm.On("CreateTransaction", mock.Anything, txmgr.TxRequest{ + IdempotencyKey: &idempotencyKey, + FromAddress: fromAddress, + ToAddress: toAddress, + EncodedPayload: payload, + FeeLimit: gasLimit, + ForwarderAddress: common.Address{}, + Meta: txMeta, + Strategy: strategy, + }).Return(txmgr.Tx{}, nil).Once() + + require.NoError(t, transmitter.CreateEthTransaction(testutils.Context(t), toAddress, payload, txMeta)) + txm.AssertExpectations(t) +} + +func NewKeyStore(t testing.TB, ds sqlutil.DataSource) keystore.Master { + ctx := testutils.Context(t) + keystore := keystore.NewInMemory(ds, utils.FastScryptParams, logger.TestLogger(t)) + require.NoError(t, keystore.Unlock(ctx, Password)) + return keystore +} + +type RandomKey struct { + Nonce int64 + Disabled bool + + chainIDs []ubig.Big // nil: Fixture, set empty for none +} + +func (r RandomKey) MustInsert(t testing.TB, keystore keystore.Eth) (ethkey.KeyV2, common.Address) { + ctx := testutils.Context(t) + chainIDs := r.chainIDs + if chainIDs == nil { + chainIDs = []ubig.Big{*ubig.New(&FixtureChainID)} + } + + key := MustGenerateRandomKey(t) + keystore.XXXTestingOnlyAdd(ctx, key) + + for _, cid := range chainIDs { + require.NoError(t, keystore.Add(ctx, key.Address, cid.ToInt())) + require.NoError(t, keystore.Enable(ctx, key.Address, cid.ToInt())) + if r.Disabled { + require.NoError(t, keystore.Disable(ctx, key.Address, cid.ToInt())) + } + } + + return key, key.Address +} + +func MustInsertRandomKey(t testing.TB, keystore keystore.Eth, chainIDs ...ubig.Big) (ethkey.KeyV2, common.Address) { + r := RandomKey{} + if len(chainIDs) > 0 { + r.chainIDs = chainIDs + } + return r.MustInsert(t, keystore) +} + +func MustGenerateRandomKey(t testing.TB) ethkey.KeyV2 { + key, err := ethkey.NewV2() + require.NoError(t, err) + return key +} diff --git a/core/services/ocr2/plugins/ccip/vars.go b/core/services/ocr2/plugins/ccip/vars.go new file mode 100644 index 00000000000..a44f5e41d66 --- /dev/null +++ b/core/services/ocr2/plugins/ccip/vars.go @@ -0,0 +1,14 @@ +package ccip + +import ( + "github.com/pkg/errors" +) + +const ( + MaxQueryLength = 0 // empty for both plugins + MaxObservationLength = 250_000 // plugins's Observation should make sure to cap to this limit + CommitPluginLabel = "commit" + ExecPluginLabel = "exec" +) + +var ErrChainIsNotHealthy = errors.New("lane processing is stopped because of healthcheck failure, please see crit logs") diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go index 2993a67114e..8d98a282674 100644 --- a/core/services/ocr2/validate/validate.go +++ b/core/services/ocr2/validate/validate.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "os/exec" + "strings" "github.com/lib/pq" "github.com/pelletier/go-toml" @@ -19,9 +20,11 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/config/env" "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config" mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" + "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" "github.com/smartcontractkit/chainlink/v2/core/services/relay" "github.com/smartcontractkit/chainlink/v2/plugins" ) @@ -115,6 +118,10 @@ func validateSpec(ctx context.Context, tree *toml.Tree, spec job.Job, rc plugins return nil case types.Mercury: return validateOCR2MercurySpec(spec.OCR2OracleSpec.PluginConfig, *spec.OCR2OracleSpec.FeedID) + case types.CCIPExecution: + return validateOCR2CCIPExecutionSpec(spec.OCR2OracleSpec.PluginConfig) + case types.CCIPCommit: + return validateOCR2CCIPCommitSpec(spec.OCR2OracleSpec.PluginConfig) case types.LLO: return validateOCR2LLOSpec(spec.OCR2OracleSpec.PluginConfig) case types.GenericPlugin: @@ -313,11 +320,61 @@ func validateOCR2MercurySpec(jsonConfig job.JSONConfig, feedId [32]byte) error { var pluginConfig mercuryconfig.PluginConfig err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) if err != nil { - return pkgerrors.Wrap(err, "error while unmarshaling plugin config") + return pkgerrors.Wrap(err, "error while unmarshalling plugin config") } return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedId), "Mercury PluginConfig is invalid") } +func validateOCR2CCIPExecutionSpec(jsonConfig job.JSONConfig) error { + if jsonConfig == nil { + return errors.New("pluginConfig is empty") + } + var cfg config.ExecPluginJobSpecConfig + err := json.Unmarshal(jsonConfig.Bytes(), &cfg) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshalling plugin config") + } + if cfg.USDCConfig != (config.USDCConfig{}) { + return cfg.USDCConfig.ValidateUSDCConfig() + } + return nil +} + +func validateOCR2CCIPCommitSpec(jsonConfig job.JSONConfig) error { + if jsonConfig == nil { + return errors.New("pluginConfig is empty") + } + var cfg config.CommitPluginJobSpecConfig + err := json.Unmarshal(jsonConfig.Bytes(), &cfg) + if err != nil { + return pkgerrors.Wrap(err, "error while unmarshalling plugin config") + } + + // Ensure that either the tokenPricesUSDPipeline or the priceGetterConfig is set, but not both. + emptyPipeline := strings.Trim(cfg.TokenPricesUSDPipeline, "\n\t ") == "" + emptyPriceGetter := cfg.PriceGetterConfig == nil + if emptyPipeline && emptyPriceGetter { + return fmt.Errorf("either tokenPricesUSDPipeline or priceGetterConfig must be set") + } + if !emptyPipeline && !emptyPriceGetter { + return fmt.Errorf("only one of tokenPricesUSDPipeline or priceGetterConfig must be set: %s and %v", cfg.TokenPricesUSDPipeline, cfg.PriceGetterConfig) + } + + if !emptyPipeline { + _, err = pipeline.Parse(cfg.TokenPricesUSDPipeline) + if err != nil { + return pkgerrors.Wrap(err, "invalid token prices pipeline") + } + } else { + // Validate prices config (like it was done for the pipeline). + if emptyPriceGetter { + return pkgerrors.New("priceGetterConfig is empty") + } + } + + return nil +} + func validateOCR2LLOSpec(jsonConfig job.JSONConfig) error { var pluginConfig lloconfig.PluginConfig err := json.Unmarshal(jsonConfig.Bytes(), &pluginConfig) diff --git a/core/services/relay/evm/ccip.go b/core/services/relay/evm/ccip.go new file mode 100644 index 00000000000..34a732e1454 --- /dev/null +++ b/core/services/relay/evm/ccip.go @@ -0,0 +1,205 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/abihelpers" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/logger" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/prices" + + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" +) + +var _ cciptypes.CommitStoreReader = (*IncompleteSourceCommitStoreReader)(nil) +var _ cciptypes.CommitStoreReader = (*IncompleteDestCommitStoreReader)(nil) + +// IncompleteSourceCommitStoreReader is an implementation of CommitStoreReader with the only valid methods being +// GasPriceEstimator, ChangeConfig, and OffchainConfig +type IncompleteSourceCommitStoreReader struct { + estimator gas.EvmFeeEstimator + gasPriceEstimator *prices.DAGasPriceEstimator + sourceMaxGasPrice *big.Int + offchainConfig cciptypes.CommitOffchainConfig +} + +func NewIncompleteSourceCommitStoreReader(estimator gas.EvmFeeEstimator, sourceMaxGasPrice *big.Int) *IncompleteSourceCommitStoreReader { + return &IncompleteSourceCommitStoreReader{ + estimator: estimator, + sourceMaxGasPrice: sourceMaxGasPrice, + } +} + +func (i *IncompleteSourceCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) { + onchainConfigParsed, err := abihelpers.DecodeAbiStruct[ccip.CommitOnchainConfig](onchainConfig) + if err != nil { + return "", err + } + + offchainConfigParsed, err := ccipconfig.DecodeOffchainConfig[ccip.JSONCommitOffchainConfigV1_2_0](offchainConfig) + if err != nil { + return "", err + } + + i.gasPriceEstimator = prices.NewDAGasPriceEstimator( + i.estimator, + i.sourceMaxGasPrice, + int64(offchainConfigParsed.ExecGasPriceDeviationPPB), + int64(offchainConfigParsed.DAGasPriceDeviationPPB), + ) + i.offchainConfig = ccip.NewCommitOffchainConfig( + offchainConfigParsed.ExecGasPriceDeviationPPB, + offchainConfigParsed.GasPriceHeartBeat.Duration(), + offchainConfigParsed.TokenPriceDeviationPPB, + offchainConfigParsed.TokenPriceHeartBeat.Duration(), + offchainConfigParsed.InflightCacheExpiry.Duration(), + offchainConfigParsed.PriceReportingDisabled, + ) + + return cciptypes.Address(onchainConfigParsed.PriceRegistry.String()), nil +} + +func (i *IncompleteSourceCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) { + return cciptypes.CommitStoreReport{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) { + return []byte{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +// GasPriceEstimator returns an ExecGasPriceEstimator to satisfy the GasPriceEstimatorCommit interface, +// with deviationPPB values hardcoded to 0 when this implementation is first constructed. +// When ChangeConfig is called, another call to this method must be made to fetch a GasPriceEstimator with updated values +func (i *IncompleteSourceCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) { + return i.gasPriceEstimator, nil +} + +func (i *IncompleteSourceCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return nil, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return nil, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + return cciptypes.CommitStoreStaticConfig{}, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + return 0, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + return 0, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) { + return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) IsDown(ctx context.Context) (bool, error) { + return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) { + return i.offchainConfig, nil +} + +func (i *IncompleteSourceCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + return false, fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +func (i *IncompleteSourceCommitStoreReader) Close() error { + return fmt.Errorf("invalid usage of IncompleteSourceCommitStoreReader") +} + +// IncompleteDestCommitStoreReader is an implementation of CommitStoreReader with all valid methods except +// GasPriceEstimator, ChangeConfig, and OffchainConfig. +type IncompleteDestCommitStoreReader struct { + cs cciptypes.CommitStoreReader +} + +func NewIncompleteDestCommitStoreReader(lggr logger.Logger, versionFinder ccip.VersionFinder, address cciptypes.Address, ec client.Client, lp logpoller.LogPoller) (*IncompleteDestCommitStoreReader, error) { + cs, err := ccip.NewCommitStoreReader(lggr, versionFinder, address, ec, lp) + if err != nil { + return nil, err + } + + return &IncompleteDestCommitStoreReader{ + cs: cs, + }, nil +} + +func (i *IncompleteDestCommitStoreReader) ChangeConfig(ctx context.Context, onchainConfig []byte, offchainConfig []byte) (cciptypes.Address, error) { + return "", fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader") +} + +func (i *IncompleteDestCommitStoreReader) DecodeCommitReport(ctx context.Context, report []byte) (cciptypes.CommitStoreReport, error) { + return i.cs.DecodeCommitReport(ctx, report) +} + +func (i *IncompleteDestCommitStoreReader) EncodeCommitReport(ctx context.Context, report cciptypes.CommitStoreReport) ([]byte, error) { + return i.cs.EncodeCommitReport(ctx, report) +} + +func (i *IncompleteDestCommitStoreReader) GasPriceEstimator(ctx context.Context) (cciptypes.GasPriceEstimatorCommit, error) { + return nil, fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader") +} + +func (i *IncompleteDestCommitStoreReader) GetAcceptedCommitReportsGteTimestamp(ctx context.Context, ts time.Time, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return i.cs.GetAcceptedCommitReportsGteTimestamp(ctx, ts, confirmations) +} + +func (i *IncompleteDestCommitStoreReader) GetCommitReportMatchingSeqNum(ctx context.Context, seqNum uint64, confirmations int) ([]cciptypes.CommitStoreReportWithTxMeta, error) { + return i.cs.GetCommitReportMatchingSeqNum(ctx, seqNum, confirmations) +} + +func (i *IncompleteDestCommitStoreReader) GetCommitStoreStaticConfig(ctx context.Context) (cciptypes.CommitStoreStaticConfig, error) { + return i.cs.GetCommitStoreStaticConfig(ctx) +} + +func (i *IncompleteDestCommitStoreReader) GetExpectedNextSequenceNumber(ctx context.Context) (uint64, error) { + return i.cs.GetExpectedNextSequenceNumber(ctx) +} + +func (i *IncompleteDestCommitStoreReader) GetLatestPriceEpochAndRound(ctx context.Context) (uint64, error) { + return i.cs.GetLatestPriceEpochAndRound(ctx) +} + +func (i *IncompleteDestCommitStoreReader) IsBlessed(ctx context.Context, root [32]byte) (bool, error) { + return i.cs.IsBlessed(ctx, root) +} + +func (i *IncompleteDestCommitStoreReader) IsDestChainHealthy(ctx context.Context) (bool, error) { + return i.cs.IsDestChainHealthy(ctx) +} + +func (i *IncompleteDestCommitStoreReader) IsDown(ctx context.Context) (bool, error) { + return i.cs.IsDown(ctx) +} + +func (i *IncompleteDestCommitStoreReader) OffchainConfig(ctx context.Context) (cciptypes.CommitOffchainConfig, error) { + return cciptypes.CommitOffchainConfig{}, fmt.Errorf("invalid usage of IncompleteDestCommitStoreReader") +} + +func (i *IncompleteDestCommitStoreReader) VerifyExecutionReport(ctx context.Context, report cciptypes.ExecReport) (bool, error) { + return i.cs.VerifyExecutionReport(ctx, report) +} + +func (i *IncompleteDestCommitStoreReader) Close() error { + return i.cs.Close() +} diff --git a/core/services/relay/evm/ccip_test.go b/core/services/relay/evm/ccip_test.go new file mode 100644 index 00000000000..8c0bfe182e1 --- /dev/null +++ b/core/services/relay/evm/ccip_test.go @@ -0,0 +1,18 @@ +package evm + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_CCIPSubjectUUID(t *testing.T) { + // We want the function to be + // (1) an actual function (i.e., deterministic) + assert.Equal(t, chainToUUID(big.NewInt(1)), chainToUUID(big.NewInt(1))) + // (2) injective (produce different results for different inputs) + assert.NotEqual(t, chainToUUID(big.NewInt(1)), chainToUUID(big.NewInt(2))) + // (3) stable across runs + assert.Equal(t, "c980e777-c95c-577b-83f6-ceb26a1a982d", chainToUUID(big.NewInt(1)).String()) +} diff --git a/core/services/relay/evm/commit_provider.go b/core/services/relay/evm/commit_provider.go new file mode 100644 index 00000000000..fe3e327c7f2 --- /dev/null +++ b/core/services/relay/evm/commit_provider.go @@ -0,0 +1,309 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + + "go.uber.org/multierr" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" +) + +var _ commontypes.CCIPCommitProvider = (*SrcCommitProvider)(nil) +var _ commontypes.CCIPCommitProvider = (*DstCommitProvider)(nil) + +type SrcCommitProvider struct { + lggr logger.Logger + startBlock uint64 + client client.Client + lp logpoller.LogPoller + estimator gas.EvmFeeEstimator + maxGasPrice *big.Int + + // these values will be lazily initialized + seenOnRampAddress *cciptypes.Address + seenSourceChainSelector *uint64 + seenDestChainSelector *uint64 +} + +func NewSrcCommitProvider( + lggr logger.Logger, + startBlock uint64, + client client.Client, + lp logpoller.LogPoller, + srcEstimator gas.EvmFeeEstimator, + maxGasPrice *big.Int, +) commontypes.CCIPCommitProvider { + return &SrcCommitProvider{ + lggr: lggr, + startBlock: startBlock, + client: client, + lp: lp, + estimator: srcEstimator, + maxGasPrice: maxGasPrice, + } +} + +type DstCommitProvider struct { + lggr logger.Logger + versionFinder ccip.VersionFinder + startBlock uint64 + client client.Client + lp logpoller.LogPoller + contractTransmitter *contractTransmitter + configWatcher *configWatcher + gasEstimator gas.EvmFeeEstimator + maxGasPrice big.Int + + // these values will be lazily initialized + seenCommitStoreAddress *cciptypes.Address + seenOffRampAddress *cciptypes.Address +} + +func NewDstCommitProvider( + lggr logger.Logger, + versionFinder ccip.VersionFinder, + startBlock uint64, + client client.Client, + lp logpoller.LogPoller, + gasEstimator gas.EvmFeeEstimator, + maxGasPrice big.Int, + contractTransmitter contractTransmitter, + configWatcher *configWatcher, +) commontypes.CCIPCommitProvider { + return &DstCommitProvider{ + lggr: lggr, + versionFinder: versionFinder, + startBlock: startBlock, + client: client, + lp: lp, + contractTransmitter: &contractTransmitter, + configWatcher: configWatcher, + gasEstimator: gasEstimator, + maxGasPrice: maxGasPrice, + } +} + +func (P *SrcCommitProvider) Name() string { + return "CCIPCommitProvider.SrcRelayerProvider" +} + +// Close is called when the job that created this provider is deleted. +// At this time, any of the methods on the provider may or may not have been called. +// If NewOnRampReader has not been called, their corresponding +// Close methods will be expected to error. +func (P *SrcCommitProvider) Close() error { + versionFinder := ccip.NewEvmVersionFinder() + + unregisterFuncs := make([]func() error, 0, 2) + unregisterFuncs = append(unregisterFuncs, func() error { + // avoid panic in the case NewOnRampReader wasn't called + if P.seenOnRampAddress == nil { + return nil + } + return ccip.CloseOnRampReader(P.lggr, versionFinder, *P.seenSourceChainSelector, *P.seenDestChainSelector, *P.seenOnRampAddress, P.lp, P.client) + }) + + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} + +func (P *SrcCommitProvider) Ready() error { + return nil +} + +func (P *SrcCommitProvider) HealthReport() map[string]error { + return make(map[string]error) +} + +func (P *SrcCommitProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + // TODO CCIP-2494 + // "OffchainConfigDigester called on SrcCommitProvider. Valid on DstCommitProvider." + return UnimplementedOffchainConfigDigester{} +} + +func (P *SrcCommitProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + // // TODO CCIP-2494 + // "ContractConfigTracker called on SrcCommitProvider. Valid on DstCommitProvider.") + return UnimplementedContractConfigTracker{} +} + +func (P *SrcCommitProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + // // TODO CCIP-2494 + // "ContractTransmitter called on SrcCommitProvider. Valid on DstCommitProvider." + return UnimplementedContractTransmitter{} +} + +func (P *SrcCommitProvider) ChainReader() commontypes.ContractReader { + return nil +} + +func (P *SrcCommitProvider) Codec() commontypes.Codec { + return nil +} + +func (P *DstCommitProvider) Name() string { + return "CCIPCommitProvider.DstRelayerProvider" +} + +func (P *DstCommitProvider) Close() error { + versionFinder := ccip.NewEvmVersionFinder() + + unregisterFuncs := make([]func() error, 0, 2) + unregisterFuncs = append(unregisterFuncs, func() error { + if P.seenCommitStoreAddress == nil { + return nil + } + return ccip.CloseCommitStoreReader(P.lggr, versionFinder, *P.seenCommitStoreAddress, P.client, P.lp) + }) + unregisterFuncs = append(unregisterFuncs, func() error { + if P.seenOffRampAddress == nil { + return nil + } + return ccip.CloseOffRampReader(P.lggr, versionFinder, *P.seenOffRampAddress, P.client, P.lp, nil, big.NewInt(0)) + }) + + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} + +func (P *DstCommitProvider) Ready() error { + return nil +} + +func (P *DstCommitProvider) HealthReport() map[string]error { + return make(map[string]error) +} + +func (P *DstCommitProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return P.configWatcher.OffchainConfigDigester() +} + +func (P *DstCommitProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return P.configWatcher.ContractConfigTracker() +} + +func (P *DstCommitProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return P.contractTransmitter +} + +func (P *DstCommitProvider) ChainReader() commontypes.ContractReader { + return nil +} + +func (P *DstCommitProvider) Codec() commontypes.Codec { + return nil +} + +func (P *SrcCommitProvider) Start(ctx context.Context) error { + if P.startBlock != 0 { + P.lggr.Infow("start replaying src chain", "fromBlock", P.startBlock) + return P.lp.Replay(ctx, int64(P.startBlock)) + } + return nil +} + +func (P *DstCommitProvider) Start(ctx context.Context) error { + if P.startBlock != 0 { + P.lggr.Infow("start replaying dst chain", "fromBlock", P.startBlock) + return P.lp.Replay(ctx, int64(P.startBlock)) + } + return nil +} + +func (P *SrcCommitProvider) NewPriceGetter(ctx context.Context) (priceGetter cciptypes.PriceGetter, err error) { + return nil, fmt.Errorf("can't construct a price getter from one relayer") +} + +func (P *DstCommitProvider) NewPriceGetter(ctx context.Context) (priceGetter cciptypes.PriceGetter, err error) { + return nil, fmt.Errorf("can't construct a price getter from one relayer") +} + +func (P *SrcCommitProvider) NewCommitStoreReader(ctx context.Context, commitStoreAddress cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) { + commitStoreReader = NewIncompleteSourceCommitStoreReader(P.estimator, P.maxGasPrice) + return +} + +func (P *DstCommitProvider) NewCommitStoreReader(ctx context.Context, commitStoreAddress cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) { + P.seenCommitStoreAddress = &commitStoreAddress + + versionFinder := ccip.NewEvmVersionFinder() + commitStoreReader, err = NewIncompleteDestCommitStoreReader(P.lggr, versionFinder, commitStoreAddress, P.client, P.lp) + return +} + +func (P *SrcCommitProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) { + P.seenOnRampAddress = &onRampAddress + P.seenSourceChainSelector = &sourceChainSelector + P.seenDestChainSelector = &destChainSelector + + versionFinder := ccip.NewEvmVersionFinder() + onRampReader, err = ccip.NewOnRampReader(P.lggr, versionFinder, sourceChainSelector, destChainSelector, onRampAddress, P.lp, P.client) + return +} + +func (P *DstCommitProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) { + return nil, fmt.Errorf("invalid: NewOnRampReader called for DstCommitProvider.NewOnRampReader should be called on SrcCommitProvider") +} + +func (P *SrcCommitProvider) NewOffRampReader(ctx context.Context, offRampAddr cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) { + return nil, fmt.Errorf("invalid: NewOffRampReader called for SrcCommitProvider. NewOffRampReader should be called on DstCommitProvider") +} + +func (P *DstCommitProvider) NewOffRampReader(ctx context.Context, offRampAddr cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) { + offRampReader, err = ccip.NewOffRampReader(P.lggr, P.versionFinder, offRampAddr, P.client, P.lp, P.gasEstimator, &P.maxGasPrice, true) + return +} + +func (P *SrcCommitProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) { + return nil, fmt.Errorf("invalid: NewPriceRegistryReader called for SrcCommitProvider. NewOffRampReader should be called on DstCommitProvider") +} + +func (P *DstCommitProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) { + destPriceRegistry := ccip.NewEvmPriceRegistry(P.lp, P.client, P.lggr, ccip.CommitPluginLabel) + priceRegistryReader, err = destPriceRegistry.NewPriceRegistryReader(ctx, addr) + return +} + +func (P *SrcCommitProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) { + sourceRouterAddrHex, err := ccip.GenericAddrToEvm(sourceRouterAddr) + if err != nil { + return "", err + } + sourceRouter, err := router.NewRouter(sourceRouterAddrHex, P.client) + if err != nil { + return "", err + } + sourceNative, err := sourceRouter.GetWrappedNative(&bind.CallOpts{Context: ctx}) + if err != nil { + return "", err + } + + return ccip.EvmAddrToGeneric(sourceNative), nil +} + +func (P *DstCommitProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) { + return "", fmt.Errorf("invalid: SourceNativeToken called for DstCommitProvider. SourceNativeToken should be called on SrcCommitProvider") +} diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index a0782380b5b..e310464a556 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -1,13 +1,24 @@ package evm import ( + "bytes" "context" + "crypto/sha256" "encoding/json" "errors" "fmt" + "math/big" "strings" "sync" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipcommit" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/ccipexec" + ccipconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config" + cciptransmitter "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/transmitter" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" @@ -72,6 +83,57 @@ func init() { var _ commontypes.Relayer = &Relayer{} //nolint:staticcheck +// The current PluginProvider interface does not support an error return. This was fine up until CCIP. +// CCIP is the first product to introduce the idea of incomplete implementations of a provider based on +// what chain (for CCIP, src or dest) the provider is created for. The Unimplemented* implementations below allow us to return +// a non nil value, which is hopefully a better developer experience should you find yourself using the right methods +// but on the *wrong* provider. + +// [UnimplementedOffchainConfigDigester] satisfies the OCR OffchainConfigDigester interface +type UnimplementedOffchainConfigDigester struct{} + +func (e UnimplementedOffchainConfigDigester) ConfigDigest(config ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) { + return ocrtypes.ConfigDigest{}, fmt.Errorf("unimplemented for this relayer") +} + +func (e UnimplementedOffchainConfigDigester) ConfigDigestPrefix() (ocrtypes.ConfigDigestPrefix, error) { + return 0, fmt.Errorf("unimplemented for this relayer") +} + +// [UnimplementedContractConfigTracker] satisfies the OCR ContractConfigTracker interface +type UnimplementedContractConfigTracker struct{} + +func (u UnimplementedContractConfigTracker) Notify() <-chan struct{} { + return nil +} + +func (u UnimplementedContractConfigTracker) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) { + return 0, ocrtypes.ConfigDigest{}, fmt.Errorf("unimplemented for this relayer") +} + +func (u UnimplementedContractConfigTracker) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) { + return ocrtypes.ContractConfig{}, fmt.Errorf("unimplemented for this relayer") +} + +func (u UnimplementedContractConfigTracker) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) { + return 0, fmt.Errorf("unimplemented for this relayer") +} + +// [UnimplementedContractTransmitter] satisfies the OCR ContractTransmitter interface +type UnimplementedContractTransmitter struct{} + +func (u UnimplementedContractTransmitter) Transmit(context.Context, ocrtypes.ReportContext, ocrtypes.Report, []ocrtypes.AttributedOnchainSignature) error { + return fmt.Errorf("unimplemented for this relayer") +} + +func (u UnimplementedContractTransmitter) FromAccount() (ocrtypes.Account, error) { + return "", fmt.Errorf("unimplemented for this relayer") +} + +func (u UnimplementedContractTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (configDigest ocrtypes.ConfigDigest, epoch uint32, err error) { + return ocrtypes.ConfigDigest{}, 0, fmt.Errorf("unimplemented for this relayer") +} + type Relayer struct { ds sqlutil.DataSource chain legacyevm.Chain @@ -618,6 +680,17 @@ func generateTransmitterFrom(ctx context.Context, rargs commontypes.RelayArgs, e configWatcher.chain.ID(), ethKeystore, ) + case commontypes.CCIPExecution: + transmitter, err = cciptransmitter.NewTransmitterWithStatusChecker( + configWatcher.chain.TxManager(), + fromAddresses, + gasLimit, + effectiveTransmitterAddress, + strategy, + checker, + configWatcher.chain.ID(), + ethKeystore, + ) default: transmitter, err = ocrcommon.NewTransmitter( configWatcher.chain.TxManager(), @@ -734,12 +807,158 @@ func (r *Relayer) NewAutomationProvider(rargs commontypes.RelayArgs, pargs commo return ocr2keeperRelayer.NewOCR2KeeperProvider(rargs, pargs) } -func (r *Relayer) NewCCIPCommitProvider(_ commontypes.RelayArgs, _ commontypes.PluginArgs) (commontypes.CCIPCommitProvider, error) { - return nil, errors.New("ccip.commit is not supported for evm") +func chainToUUID(chainID *big.Int) uuid.UUID { + // See https://www.rfc-editor.org/rfc/rfc4122.html#section-4.1.3 for the list of supported versions. + const VersionSHA1 = 5 + var buf bytes.Buffer + buf.WriteString("CCIP:") + buf.Write(chainID.Bytes()) + // We use SHA-256 instead of SHA-1 because the former has better collision resistance. + // The UUID will contain only the first 16 bytes of the hash. + // You can't say which algorithms was used just by looking at the UUID bytes. + return uuid.NewHash(sha256.New(), uuid.NameSpaceOID, buf.Bytes(), VersionSHA1) } -func (r *Relayer) NewCCIPExecProvider(_ commontypes.RelayArgs, _ commontypes.PluginArgs) (commontypes.CCIPExecProvider, error) { - return nil, errors.New("ccip.exec is not supported for evm") +// NewCCIPCommitProvider constructs a provider of type CCIPCommitProvider. Since this is happening in the Relayer, +// which lives in a separate process from delegate which is requesting a provider, we need to wire in through pargs +// which *type* (impl) of CCIPCommitProvider should be created. CCIP is currently a special case where the provider has a +// subset of implementations of the complete interface as certain contracts in a CCIP lane are only deployed on the src +// chain or on the dst chain. This results in the two implementations of providers: a src and dst implementation. +func (r *Relayer) NewCCIPCommitProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.CCIPCommitProvider, error) { + // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + ctx := context.Background() + + versionFinder := ccip.NewEvmVersionFinder() + + var commitPluginConfig ccipconfig.CommitPluginConfig + err := json.Unmarshal(pargs.PluginConfig, &commitPluginConfig) + if err != nil { + return nil, err + } + sourceStartBlock := commitPluginConfig.SourceStartBlock + destStartBlock := commitPluginConfig.DestStartBlock + + // The src chain implementation of this provider does not need a configWatcher or contractTransmitter; + // bail early. + if commitPluginConfig.IsSourceProvider { + return NewSrcCommitProvider( + r.lggr, + sourceStartBlock, + r.chain.Client(), + r.chain.LogPoller(), + r.chain.GasEstimator(), + r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(), + ), nil + } + + relayOpts := types.NewRelayOpts(rargs) + configWatcher, err := newStandardConfigProvider(ctx, r.lggr, r.chain, relayOpts) + if err != nil { + return nil, err + } + address := common.HexToAddress(relayOpts.ContractID) + typ, ver, err := ccipconfig.TypeAndVersion(address, r.chain.Client()) + if err != nil { + return nil, err + } + fn, err := ccipcommit.CommitReportToEthTxMeta(typ, ver) + if err != nil { + return nil, err + } + subjectID := chainToUUID(configWatcher.chain.ID()) + contractTransmitter, err := newOnChainContractTransmitter(ctx, r.lggr, rargs, r.ks.Eth(), configWatcher, configTransmitterOpts{ + subjectID: &subjectID, + }, OCR2AggregatorTransmissionContractABI, WithReportToEthMetadata(fn), WithRetention(0)) + if err != nil { + return nil, err + } + + return NewDstCommitProvider( + r.lggr, + versionFinder, + destStartBlock, + r.chain.Client(), + r.chain.LogPoller(), + r.chain.GasEstimator(), + *r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(), + *contractTransmitter, + configWatcher, + ), nil +} + +// NewCCIPExecProvider constructs a provider of type CCIPExecProvider. Since this is happening in the Relayer, +// which lives in a separate process from delegate which is requesting a provider, we need to wire in through pargs +// which *type* (impl) of CCIPExecProvider should be created. CCIP is currently a special case where the provider has a +// subset of implementations of the complete interface as certain contracts in a CCIP lane are only deployed on the src +// chain or on the dst chain. This results in the two implementations of providers: a src and dst implementation. +func (r *Relayer) NewCCIPExecProvider(rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.CCIPExecProvider, error) { + // TODO https://smartcontract-it.atlassian.net/browse/BCF-2887 + ctx := context.Background() + + versionFinder := ccip.NewEvmVersionFinder() + + var execPluginConfig ccipconfig.ExecPluginConfig + err := json.Unmarshal(pargs.PluginConfig, &execPluginConfig) + if err != nil { + return nil, err + } + + usdcConfig := execPluginConfig.USDCConfig + + // The src chain implementation of this provider does not need a configWatcher or contractTransmitter; + // bail early. + if execPluginConfig.IsSourceProvider { + return NewSrcExecProvider( + r.lggr, + versionFinder, + r.chain.Client(), + r.chain.GasEstimator(), + r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(), + r.chain.LogPoller(), + execPluginConfig.SourceStartBlock, + execPluginConfig.JobID, + usdcConfig.AttestationAPI, + int(usdcConfig.AttestationAPITimeoutSeconds), + usdcConfig.AttestationAPIIntervalMilliseconds, + usdcConfig.SourceMessageTransmitterAddress, + ) + } + + relayOpts := types.NewRelayOpts(rargs) + configWatcher, err := newStandardConfigProvider(ctx, r.lggr, r.chain, relayOpts) + if err != nil { + return nil, err + } + address := common.HexToAddress(relayOpts.ContractID) + typ, ver, err := ccipconfig.TypeAndVersion(address, r.chain.Client()) + if err != nil { + return nil, err + } + fn, err := ccipexec.ExecReportToEthTxMeta(ctx, typ, ver) + if err != nil { + return nil, err + } + subjectID := chainToUUID(configWatcher.chain.ID()) + contractTransmitter, err := newOnChainContractTransmitter(ctx, r.lggr, rargs, r.ks.Eth(), configWatcher, configTransmitterOpts{ + subjectID: &subjectID, + }, OCR2AggregatorTransmissionContractABI, WithReportToEthMetadata(fn), WithRetention(0)) + if err != nil { + return nil, err + } + + return NewDstExecProvider( + r.lggr, + versionFinder, + r.chain.Client(), + r.chain.LogPoller(), + execPluginConfig.DestStartBlock, + contractTransmitter, + configWatcher, + r.chain.GasEstimator(), + *r.chain.Config().EVM().GasEstimator().PriceMax().ToInt(), + r.chain.TxManager(), + cciptypes.Address(rargs.ContractID), + ) } var _ commontypes.MedianProvider = (*medianProvider)(nil) diff --git a/core/services/relay/evm/exec_provider.go b/core/services/relay/evm/exec_provider.go new file mode 100644 index 00000000000..ae3ce56532a --- /dev/null +++ b/core/services/relay/evm/exec_provider.go @@ -0,0 +1,391 @@ +package evm + +import ( + "context" + "fmt" + "math/big" + "net/url" + "time" + + "go.uber.org/multierr" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + "github.com/smartcontractkit/chainlink-common/pkg/types" + commontypes "github.com/smartcontractkit/chainlink-common/pkg/types" + cciptypes "github.com/smartcontractkit/chainlink-common/pkg/types/ccip" + + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/gas" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router" + "github.com/smartcontractkit/chainlink/v2/core/logger" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip" + "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/tokendata/usdc" +) + +type SrcExecProvider struct { + lggr logger.Logger + versionFinder ccip.VersionFinder + client client.Client + lp logpoller.LogPoller + startBlock uint64 + estimator gas.EvmFeeEstimator + maxGasPrice *big.Int + usdcReader *ccip.USDCReaderImpl + usdcAttestationAPI string + usdcAttestationAPITimeoutSeconds int + usdcAttestationAPIIntervalMilliseconds int + usdcSrcMsgTransmitterAddr common.Address + + // these values are nil and are updated for Close() + seenOnRampAddress *cciptypes.Address + seenSourceChainSelector *uint64 + seenDestChainSelector *uint64 +} + +func NewSrcExecProvider( + lggr logger.Logger, + versionFinder ccip.VersionFinder, + client client.Client, + estimator gas.EvmFeeEstimator, + maxGasPrice *big.Int, + lp logpoller.LogPoller, + startBlock uint64, + jobID string, + usdcAttestationAPI string, + usdcAttestationAPITimeoutSeconds int, + usdcAttestationAPIIntervalMilliseconds int, + usdcSrcMsgTransmitterAddr common.Address, +) (commontypes.CCIPExecProvider, error) { + var usdcReader *ccip.USDCReaderImpl + var err error + if usdcAttestationAPI != "" { + usdcReader, err = ccip.NewUSDCReader(lggr, jobID, usdcSrcMsgTransmitterAddr, lp, true) + if err != nil { + return nil, fmt.Errorf("new usdc reader: %w", err) + } + } + + return &SrcExecProvider{ + lggr: lggr, + versionFinder: versionFinder, + client: client, + estimator: estimator, + maxGasPrice: maxGasPrice, + lp: lp, + startBlock: startBlock, + usdcReader: usdcReader, + usdcAttestationAPI: usdcAttestationAPI, + usdcAttestationAPITimeoutSeconds: usdcAttestationAPITimeoutSeconds, + usdcAttestationAPIIntervalMilliseconds: usdcAttestationAPIIntervalMilliseconds, + usdcSrcMsgTransmitterAddr: usdcSrcMsgTransmitterAddr, + }, nil +} + +func (s *SrcExecProvider) Name() string { + return "CCIP.SrcExecProvider" +} + +func (s *SrcExecProvider) Start(ctx context.Context) error { + if s.startBlock != 0 { + s.lggr.Infow("start replaying src chain", "fromBlock", s.startBlock) + return s.lp.Replay(ctx, int64(s.startBlock)) + } + return nil +} + +// Close is called when the job that created this provider is closed. +func (s *SrcExecProvider) Close() error { + versionFinder := ccip.NewEvmVersionFinder() + + unregisterFuncs := make([]func() error, 0, 2) + unregisterFuncs = append(unregisterFuncs, func() error { + // avoid panic in the case NewOnRampReader wasn't called + if s.seenOnRampAddress == nil { + return nil + } + return ccip.CloseOnRampReader(s.lggr, versionFinder, *s.seenSourceChainSelector, *s.seenDestChainSelector, *s.seenOnRampAddress, s.lp, s.client) + }) + unregisterFuncs = append(unregisterFuncs, func() error { + if s.usdcAttestationAPI == "" { + return nil + } + return ccip.CloseUSDCReader(s.lggr, s.lggr.Name(), s.usdcSrcMsgTransmitterAddr, s.lp) + }) + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} + +func (s *SrcExecProvider) Ready() error { + return nil +} + +func (s *SrcExecProvider) HealthReport() map[string]error { + return make(map[string]error) +} + +func (s *SrcExecProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + // TODO CCIP-2494 + // OffchainConfigDigester called on SrcExecProvider. It should only be called on DstExecProvider + return UnimplementedOffchainConfigDigester{} +} + +func (s *SrcExecProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + // TODO CCIP-2494 + // "ContractConfigTracker called on SrcExecProvider. It should only be called on DstExecProvider + return UnimplementedContractConfigTracker{} +} + +func (s *SrcExecProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + // TODO CCIP-2494 + // "ContractTransmitter called on SrcExecProvider. It should only be called on DstExecProvider + return UnimplementedContractTransmitter{} +} + +func (s *SrcExecProvider) ChainReader() commontypes.ContractReader { + return nil +} + +func (s *SrcExecProvider) Codec() commontypes.Codec { + return nil +} + +func (s *SrcExecProvider) GetTransactionStatus(ctx context.Context, transactionID string) (types.TransactionStatus, error) { + return 0, fmt.Errorf("invalid: GetTransactionStatus called on SrcExecProvider. It should only be called on DstExecProvider") +} + +func (s *SrcExecProvider) NewCommitStoreReader(ctx context.Context, addr cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) { + commitStoreReader = NewIncompleteSourceCommitStoreReader(s.estimator, s.maxGasPrice) + return +} + +func (s *SrcExecProvider) NewOffRampReader(ctx context.Context, addr cciptypes.Address) (cciptypes.OffRampReader, error) { + return nil, fmt.Errorf("invalid: NewOffRampReader called on SrcExecProvider. Valid on DstExecProvider") +} + +func (s *SrcExecProvider) NewOnRampReader(ctx context.Context, onRampAddress cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (onRampReader cciptypes.OnRampReader, err error) { + s.seenOnRampAddress = &onRampAddress + + versionFinder := ccip.NewEvmVersionFinder() + onRampReader, err = ccip.NewOnRampReader(s.lggr, versionFinder, sourceChainSelector, destChainSelector, onRampAddress, s.lp, s.client) + return +} + +func (s *SrcExecProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) { + srcPriceRegistry := ccip.NewEvmPriceRegistry(s.lp, s.client, s.lggr, ccip.ExecPluginLabel) + priceRegistryReader, err = srcPriceRegistry.NewPriceRegistryReader(ctx, addr) + return +} + +func (s *SrcExecProvider) NewTokenDataReader(ctx context.Context, tokenAddress cciptypes.Address) (tokenDataReader cciptypes.TokenDataReader, err error) { + attestationURI, err2 := url.ParseRequestURI(s.usdcAttestationAPI) + if err2 != nil { + return nil, fmt.Errorf("failed to parse USDC attestation API: %w", err2) + } + tokenAddr, err2 := ccip.GenericAddrToEvm(tokenAddress) + if err2 != nil { + return nil, fmt.Errorf("failed to parse token address: %w", err2) + } + tokenDataReader = usdc.NewUSDCTokenDataReader( + s.lggr, + s.usdcReader, + attestationURI, + s.usdcAttestationAPITimeoutSeconds, + tokenAddr, + time.Duration(s.usdcAttestationAPIIntervalMilliseconds)*time.Millisecond, + ) + return +} + +func (s *SrcExecProvider) NewTokenPoolBatchedReader(ctx context.Context, offRampAddr cciptypes.Address, sourceChainSelector uint64) (cciptypes.TokenPoolBatchedReader, error) { + return nil, fmt.Errorf("invalid: NewTokenPoolBatchedReader called on SrcExecProvider. It should only be called on DstExecProvdier") +} + +func (s *SrcExecProvider) SourceNativeToken(ctx context.Context, sourceRouterAddr cciptypes.Address) (cciptypes.Address, error) { + sourceRouterAddrHex, err := ccip.GenericAddrToEvm(sourceRouterAddr) + if err != nil { + return "", err + } + sourceRouter, err := router.NewRouter(sourceRouterAddrHex, s.client) + if err != nil { + return "", err + } + sourceNative, err := sourceRouter.GetWrappedNative(&bind.CallOpts{Context: ctx}) + if err != nil { + return "", err + } + + return ccip.EvmAddrToGeneric(sourceNative), nil +} + +type DstExecProvider struct { + lggr logger.Logger + versionFinder ccip.VersionFinder + client client.Client + lp logpoller.LogPoller + startBlock uint64 + contractTransmitter *contractTransmitter + configWatcher *configWatcher + gasEstimator gas.EvmFeeEstimator + maxGasPrice big.Int + txm txmgr.TxManager + offRampAddress cciptypes.Address + + // these values are nil and are updated for Close() + seenCommitStoreAddr *cciptypes.Address +} + +func NewDstExecProvider( + lggr logger.Logger, + versionFinder ccip.VersionFinder, + client client.Client, + lp logpoller.LogPoller, + startBlock uint64, + contractTransmitter *contractTransmitter, + configWatcher *configWatcher, + gasEstimator gas.EvmFeeEstimator, + maxGasPrice big.Int, + txm txmgr.TxManager, + offRampAddress cciptypes.Address, +) (commontypes.CCIPExecProvider, error) { + return &DstExecProvider{ + lggr: lggr, + versionFinder: versionFinder, + client: client, + lp: lp, + startBlock: startBlock, + contractTransmitter: contractTransmitter, + configWatcher: configWatcher, + gasEstimator: gasEstimator, + maxGasPrice: maxGasPrice, + txm: txm, + offRampAddress: offRampAddress, + }, nil +} + +func (d *DstExecProvider) Name() string { + return "CCIP.DestRelayerExecProvider" +} + +func (d *DstExecProvider) Start(ctx context.Context) error { + if d.startBlock != 0 { + d.lggr.Infow("start replaying dst chain", "fromBlock", d.startBlock) + return d.lp.Replay(ctx, int64(d.startBlock)) + } + return nil +} + +// Close is called when the job that created this provider is deleted +// At this time, any of the methods on the provider may or may not have been called. +// If NewOnRampReader and NewCommitStoreReader have not been called, their corresponding +// Close methods will be expected to error. +func (d *DstExecProvider) Close() error { + versionFinder := ccip.NewEvmVersionFinder() + + unregisterFuncs := make([]func() error, 0, 2) + unregisterFuncs = append(unregisterFuncs, func() error { + if d.seenCommitStoreAddr == nil { + return nil + } + return ccip.CloseCommitStoreReader(d.lggr, versionFinder, *d.seenCommitStoreAddr, d.client, d.lp) + }) + unregisterFuncs = append(unregisterFuncs, func() error { + return ccip.CloseOffRampReader(d.lggr, versionFinder, d.offRampAddress, d.client, d.lp, nil, big.NewInt(0)) + }) + + var multiErr error + for _, fn := range unregisterFuncs { + if err := fn(); err != nil { + multiErr = multierr.Append(multiErr, err) + } + } + return multiErr +} + +func (d *DstExecProvider) Ready() error { + return nil +} + +func (d *DstExecProvider) HealthReport() map[string]error { + return make(map[string]error) +} + +func (d *DstExecProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester { + return d.configWatcher.OffchainConfigDigester() +} + +func (d *DstExecProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker { + return d.configWatcher.ContractConfigTracker() +} + +func (d *DstExecProvider) ContractTransmitter() ocrtypes.ContractTransmitter { + return d.contractTransmitter +} + +func (d *DstExecProvider) ChainReader() commontypes.ContractReader { + return nil +} + +func (d *DstExecProvider) Codec() commontypes.Codec { + return nil +} + +func (d *DstExecProvider) GetTransactionStatus(ctx context.Context, transactionID string) (types.TransactionStatus, error) { + return d.txm.GetTransactionStatus(ctx, transactionID) +} + +func (d *DstExecProvider) NewCommitStoreReader(ctx context.Context, addr cciptypes.Address) (commitStoreReader cciptypes.CommitStoreReader, err error) { + d.seenCommitStoreAddr = &addr + + versionFinder := ccip.NewEvmVersionFinder() + commitStoreReader, err = NewIncompleteDestCommitStoreReader(d.lggr, versionFinder, addr, d.client, d.lp) + return +} + +func (d *DstExecProvider) NewOffRampReader(ctx context.Context, offRampAddress cciptypes.Address) (offRampReader cciptypes.OffRampReader, err error) { + offRampReader, err = ccip.NewOffRampReader(d.lggr, d.versionFinder, offRampAddress, d.client, d.lp, d.gasEstimator, &d.maxGasPrice, true) + return +} + +func (d *DstExecProvider) NewOnRampReader(ctx context.Context, addr cciptypes.Address, sourceChainSelector uint64, destChainSelector uint64) (cciptypes.OnRampReader, error) { + return nil, fmt.Errorf("invalid: NewOnRampReader called on DstExecProvider. It should only be called on SrcExecProvider") +} + +func (d *DstExecProvider) NewPriceRegistryReader(ctx context.Context, addr cciptypes.Address) (priceRegistryReader cciptypes.PriceRegistryReader, err error) { + destPriceRegistry := ccip.NewEvmPriceRegistry(d.lp, d.client, d.lggr, ccip.ExecPluginLabel) + priceRegistryReader, err = destPriceRegistry.NewPriceRegistryReader(ctx, addr) + return +} + +func (d *DstExecProvider) NewTokenDataReader(ctx context.Context, tokenAddress cciptypes.Address) (cciptypes.TokenDataReader, error) { + return nil, fmt.Errorf("invalid: NewTokenDataReader called on DstExecProvider. It should only be called on SrcExecProvider") +} + +func (d *DstExecProvider) NewTokenPoolBatchedReader(ctx context.Context, offRampAddress cciptypes.Address, sourceChainSelector uint64) (tokenPoolBatchedReader cciptypes.TokenPoolBatchedReader, err error) { + batchCaller := ccip.NewDynamicLimitedBatchCaller( + d.lggr, + d.client, + uint(ccip.DefaultRpcBatchSizeLimit), + uint(ccip.DefaultRpcBatchBackOffMultiplier), + uint(ccip.DefaultMaxParallelRpcCalls), + ) + + tokenPoolBatchedReader, err = ccip.NewEVMTokenPoolBatchedReader(d.lggr, sourceChainSelector, offRampAddress, batchCaller) + if err != nil { + return nil, fmt.Errorf("new token pool batched reader: %w", err) + } + return +} + +func (d *DstExecProvider) SourceNativeToken(ctx context.Context, addr cciptypes.Address) (cciptypes.Address, error) { + return "", fmt.Errorf("invalid: SourceNativeToken called on DstExecProvider. It should only be called on SrcExecProvider") +} diff --git a/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go b/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go new file mode 100644 index 00000000000..9bd59ccf4ef --- /dev/null +++ b/core/services/relay/evm/statuschecker/mocks/ccip_transaction_status_checker.go @@ -0,0 +1,104 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + types "github.com/smartcontractkit/chainlink-common/pkg/types" +) + +// CCIPTransactionStatusChecker is an autogenerated mock type for the CCIPTransactionStatusChecker type +type CCIPTransactionStatusChecker struct { + mock.Mock +} + +type CCIPTransactionStatusChecker_Expecter struct { + mock *mock.Mock +} + +func (_m *CCIPTransactionStatusChecker) EXPECT() *CCIPTransactionStatusChecker_Expecter { + return &CCIPTransactionStatusChecker_Expecter{mock: &_m.Mock} +} + +// CheckMessageStatus provides a mock function with given fields: ctx, msgID +func (_m *CCIPTransactionStatusChecker) CheckMessageStatus(ctx context.Context, msgID string) ([]types.TransactionStatus, int, error) { + ret := _m.Called(ctx, msgID) + + if len(ret) == 0 { + panic("no return value specified for CheckMessageStatus") + } + + var r0 []types.TransactionStatus + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]types.TransactionStatus, int, error)); ok { + return rf(ctx, msgID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []types.TransactionStatus); ok { + r0 = rf(ctx, msgID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.TransactionStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) int); ok { + r1 = rf(ctx, msgID) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, msgID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// CCIPTransactionStatusChecker_CheckMessageStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckMessageStatus' +type CCIPTransactionStatusChecker_CheckMessageStatus_Call struct { + *mock.Call +} + +// CheckMessageStatus is a helper method to define mock.On call +// - ctx context.Context +// - msgID string +func (_e *CCIPTransactionStatusChecker_Expecter) CheckMessageStatus(ctx interface{}, msgID interface{}) *CCIPTransactionStatusChecker_CheckMessageStatus_Call { + return &CCIPTransactionStatusChecker_CheckMessageStatus_Call{Call: _e.mock.On("CheckMessageStatus", ctx, msgID)} +} + +func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) Run(run func(ctx context.Context, msgID string)) *CCIPTransactionStatusChecker_CheckMessageStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) Return(transactionStatuses []types.TransactionStatus, retryCounter int, err error) *CCIPTransactionStatusChecker_CheckMessageStatus_Call { + _c.Call.Return(transactionStatuses, retryCounter, err) + return _c +} + +func (_c *CCIPTransactionStatusChecker_CheckMessageStatus_Call) RunAndReturn(run func(context.Context, string) ([]types.TransactionStatus, int, error)) *CCIPTransactionStatusChecker_CheckMessageStatus_Call { + _c.Call.Return(run) + return _c +} + +// NewCCIPTransactionStatusChecker creates a new instance of CCIPTransactionStatusChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCCIPTransactionStatusChecker(t interface { + mock.TestingT + Cleanup(func()) +}) *CCIPTransactionStatusChecker { + mock := &CCIPTransactionStatusChecker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/core/services/relay/evm/statuschecker/txm_status_checker.go b/core/services/relay/evm/statuschecker/txm_status_checker.go new file mode 100644 index 00000000000..f22e6d78b9f --- /dev/null +++ b/core/services/relay/evm/statuschecker/txm_status_checker.go @@ -0,0 +1,54 @@ +package statuschecker + +import ( + "context" + "fmt" + + "github.com/smartcontractkit/chainlink-common/pkg/types" +) + +// CCIPTransactionStatusChecker is an interface that defines the method for checking the status of a transaction. +// CheckMessageStatus checks the status of a transaction for a given message ID. +// It returns a list of transaction statuses, the retry counter, and an error if any occurred during the process. +// + +type CCIPTransactionStatusChecker interface { + CheckMessageStatus(ctx context.Context, msgID string) (transactionStatuses []types.TransactionStatus, retryCounter int, err error) +} + +type TxmStatusChecker struct { + getTransactionStatus func(ctx context.Context, transactionID string) (types.TransactionStatus, error) +} + +func NewTxmStatusChecker(getTransactionStatus func(ctx context.Context, transactionID string) (types.TransactionStatus, error)) *TxmStatusChecker { + return &TxmStatusChecker{getTransactionStatus: getTransactionStatus} +} + +// CheckMessageStatus checks the status of a message by checking the status of all transactions associated with the message ID. +// It returns a slice of all statuses and the number of transactions found (-1 if none). +// The key will follow the format: -. TXM will be queried for each key until a NotFound error is returned. +// The goal is to find all transactions associated with a message ID and snooze messages if they are fatal in the Execution Plugin. +func (tsc *TxmStatusChecker) CheckMessageStatus(ctx context.Context, msgID string) ([]types.TransactionStatus, int, error) { + var counter int + const maxStatuses = 1000 // Cap the number of statuses to avoid infinite loop + + allStatuses := make([]types.TransactionStatus, 0) + + for { + transactionID := fmt.Sprintf("%s-%d", msgID, counter) + status, err := tsc.getTransactionStatus(ctx, transactionID) + if err != nil && status == types.Unknown { + // If the status is unknown and err not nil, it means the transaction was not found + break + } + allStatuses = append(allStatuses, status) + counter++ + + // Break the loop if the cap is reached + if counter >= maxStatuses { + return allStatuses, counter - 1, fmt.Errorf("maximum number of statuses reached, possible infinite loop") + } + } + + return allStatuses, counter - 1, nil +} diff --git a/core/services/relay/evm/statuschecker/txm_status_checker_test.go b/core/services/relay/evm/statuschecker/txm_status_checker_test.go new file mode 100644 index 00000000000..456d07e7a7d --- /dev/null +++ b/core/services/relay/evm/statuschecker/txm_status_checker_test.go @@ -0,0 +1,103 @@ +package statuschecker + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/smartcontractkit/chainlink-common/pkg/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr/mocks" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" +) + +func Test_CheckMessageStatus(t *testing.T) { + testutils.SkipShort(t, "") + ctx := context.Background() + mockTxManager := mocks.NewMockEvmTxManager(t) + checker := NewTxmStatusChecker(mockTxManager.GetTransactionStatus) + + msgID := "test-message-id" + + // Define test cases + testCases := []struct { + name string + setupMock func() + expectedStatus []types.TransactionStatus + expectedCounter int + expectedError error + }{ + { + name: "No transactions found", + setupMock: func() { + mockTxManager.Mock = mock.Mock{} + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-0")) + }, + expectedStatus: []types.TransactionStatus{}, + expectedCounter: -1, + expectedError: nil, + }, + { + name: "Single transaction found", + setupMock: func() { + mockTxManager.Mock = mock.Mock{} + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Finalized, nil) + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-1")) + }, + expectedStatus: []types.TransactionStatus{types.Finalized}, + expectedCounter: 0, + expectedError: nil, + }, + { + name: "Multiple transactions found", + setupMock: func() { + mockTxManager.Mock = mock.Mock{} + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Finalized, nil) + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Failed, nil) + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-2").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-2")) + }, + expectedStatus: []types.TransactionStatus{types.Finalized, types.Failed}, + expectedCounter: 1, + expectedError: nil, + }, + { + name: "Unknown status without nil (in progress)", + setupMock: func() { + mockTxManager.Mock = mock.Mock{} + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-0").Return(types.Unknown, nil) + mockTxManager.On("GetTransactionStatus", ctx, "test-message-id-1").Return(types.Unknown, errors.New("failed to find transaction with IdempotencyKey test-message-id-1")) + }, + expectedStatus: []types.TransactionStatus{types.Unknown}, + expectedCounter: 0, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMock() + statuses, counter, err := checker.CheckMessageStatus(ctx, msgID) + assert.Equal(t, tc.expectedStatus, statuses) + assert.Equal(t, tc.expectedCounter, counter) + assert.Equal(t, tc.expectedError, err) + mockTxManager.AssertExpectations(t) + }) + } +} + +func Test_FailForMoreThan1000Retries(t *testing.T) { + ctx := context.Background() + mockTxManager := mocks.NewMockEvmTxManager(t) + checker := NewTxmStatusChecker(mockTxManager.GetTransactionStatus) + + for i := 0; i < 1000; i++ { + mockTxManager.On("GetTransactionStatus", ctx, fmt.Sprintf("test-message-id-%d", i)).Return(types.Finalized, nil) + } + + msgID := "test-message-id" + _, _, err := checker.CheckMessageStatus(ctx, msgID) + assert.EqualError(t, err, "maximum number of statuses reached, possible infinite loop") +} diff --git a/core/services/synchronization/common.go b/core/services/synchronization/common.go index 5f469c055d4..bfb9fba6de6 100644 --- a/core/services/synchronization/common.go +++ b/core/services/synchronization/common.go @@ -16,6 +16,8 @@ const ( OCR TelemetryType = "ocr" OCR2Automation TelemetryType = "ocr2-automation" OCR2Functions TelemetryType = "ocr2-functions" + OCR2CCIPCommit TelemetryType = "ocr2-ccip-commit" + OCR2CCIPExec TelemetryType = "ocr2-ccip-exec" OCR2Threshold TelemetryType = "ocr2-threshold" OCR2S4 TelemetryType = "ocr2-s4" OCR2Median TelemetryType = "ocr2-median" diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml index 1bad3fd91c6..f1325d824ea 100644 --- a/core/web/resolver/testdata/config-empty-effective.toml +++ b/core/web/resolver/testdata/config-empty-effective.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index 1672eb1b41d..9421e6198ee 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '10s' FeedsManager = true LogPoller = true UICSAKeys = true +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1m0s' diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index 0e12af9a7e4..1c4093cbfca 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -6,6 +6,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/docs/CONFIG.md b/docs/CONFIG.md index 5caab7614e8..47935390ce8 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -51,6 +51,7 @@ ShutdownGracePeriod is the maximum time allowed to shut down gracefully. If exce FeedsManager = true # Default LogPoller = false # Default UICSAKeys = false # Default +CCIP = true # Default ``` @@ -72,6 +73,12 @@ UICSAKeys = false # Default ``` UICSAKeys enables CSA Keys in the UI. +### CCIP +```toml +CCIP = true # Default +``` +CCIP enables the CCIP service. + ## Database ```toml [Database] diff --git a/go.mod b/go.mod index 45e0b62d52e..4b216ddc0d4 100644 --- a/go.mod +++ b/go.mod @@ -9,11 +9,12 @@ require ( github.com/NethermindEth/juno v0.3.1 github.com/NethermindEth/starknet.go v0.7.1-0.20240401080518-34a506f3cfdb github.com/XSAM/otelsql v0.27.0 - github.com/avast/retry-go/v4 v4.5.1 + github.com/avast/retry-go/v4 v4.6.0 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cometbft/cometbft v0.37.2 github.com/cosmos/cosmos-sdk v0.47.4 github.com/danielkov/gin-helmet v0.0.0-20171108135313-1387e224435e + github.com/deckarep/golang-set/v2 v2.3.0 github.com/dominikbraun/graph v0.23.0 github.com/esote/minmaxheap v1.0.0 github.com/ethereum/go-ethereum v1.13.8 @@ -67,6 +68,7 @@ require ( github.com/prometheus/prometheus v0.48.1 github.com/robfig/cron/v3 v3.0.1 github.com/rogpeppe/go-internal v1.12.0 + github.com/rs/zerolog v1.30.0 github.com/scylladb/go-reflectx v1.0.1 github.com/shirou/gopsutil/v3 v3.24.3 github.com/shopspring/decimal v1.4.0 @@ -92,6 +94,7 @@ require ( github.com/umbracle/ethgo v0.1.3 github.com/unrolled/secure v1.13.0 github.com/urfave/cli v1.22.14 + github.com/wk8/go-ordered-map/v2 v2.1.8 go.dedis.ch/fixbuf v1.0.3 go.dedis.ch/kyber/v3 v3.1.0 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.49.0 @@ -112,6 +115,7 @@ require ( google.golang.org/protobuf v1.34.2 gopkg.in/guregu/null.v4 v4.0.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 ) require ( @@ -174,7 +178,6 @@ require ( github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/deckarep/golang-set/v2 v2.3.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dfuse-io/logging v0.0.0-20210109005628-b97a57253f70 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect @@ -312,7 +315,6 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect github.com/valyala/fastjson v1.4.1 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zondax/hid v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4a6b294c122..6b0ec5aa5c8 100644 --- a/go.sum +++ b/go.sum @@ -150,8 +150,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= -github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= @@ -264,6 +264,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= @@ -491,6 +492,7 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -1093,6 +1095,7 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= @@ -1911,6 +1914,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 8f9652099b1..0c0ce337695 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -6,7 +6,7 @@ go 1.22.5 replace github.com/smartcontractkit/chainlink/v2 => ../ require ( - github.com/avast/retry-go/v4 v4.5.1 + github.com/avast/retry-go/v4 v4.6.0 github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df github.com/chaos-mesh/chaos-mesh/api v0.0.0-20240709130330-9f4feec7553f github.com/cli/go-gh/v2 v2.0.0 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index bca92f4a97c..8b7bd1d2a14 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -207,8 +207,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= -github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index f0554f2c725..4395a3ce486 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -76,7 +76,7 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/avast/retry-go v3.0.0+incompatible // indirect - github.com/avast/retry-go/v4 v4.5.1 // indirect + github.com/avast/retry-go/v4 v4.6.0 // indirect github.com/aws/aws-sdk-go v1.45.25 // indirect github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect github.com/aws/jsii-runtime-go v1.75.0 // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index f31a11d389d..40152875414 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -207,8 +207,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/avast/retry-go/v4 v4.5.1 h1:AxIx0HGi4VZ3I02jr78j5lZ3M6x1E0Ivxa6b0pUUh7o= -github.com/avast/retry-go/v4 v4.5.1/go.mod h1:/sipNsvNB3RRuT5iNcb6h73nw3IBmXJ/H3XrCQYSOpc= +github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= +github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar index 1063d9c2a5a..ff8b4889c49 100644 --- a/testdata/scripts/node/validate/default.txtar +++ b/testdata/scripts/node/validate/default.txtar @@ -18,6 +18,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 56ce1ea7ba8..f4dd43cb900 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index e534c67a2f3..75a6ae36418 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index 29bc189e561..97bae5a84b6 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -62,6 +62,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar index 6a09dd06c47..0cdf001eccd 100644 --- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar +++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar @@ -47,6 +47,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 60c42c7c399..ab6860ec790 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -52,6 +52,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index 719bb8bcc47..603fdaada66 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -59,6 +59,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar index a652943e26b..dea40ec8da0 100644 --- a/testdata/scripts/node/validate/warnings.txtar +++ b/testdata/scripts/node/validate/warnings.txtar @@ -41,6 +41,7 @@ ShutdownGracePeriod = '5s' FeedsManager = true LogPoller = false UICSAKeys = false +CCIP = true [Database] DefaultIdleInTxSessionTimeout = '1h0m0s' From 1d81278edace2411f0d87b7e111321bd67b6b0a5 Mon Sep 17 00:00:00 2001 From: Bartek Tofel Date: Tue, 6 Aug 2024 13:56:03 +0200 Subject: [PATCH 6/6] update readme's with information about CL node TOML config (#14028) --- integration-tests/README.md | 5 +-- integration-tests/testconfig/README.md | 9 +++-- integration-tests/testconfig/default.toml | 41 +++++++++++++++++++++-- 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/integration-tests/README.md b/integration-tests/README.md index fcfefe97a73..180021efeef 100644 --- a/integration-tests/README.md +++ b/integration-tests/README.md @@ -27,6 +27,8 @@ version = "your tag" The `./testconfig/overrides.toml` file **should never be committed** and has been added to the [.gitignore](../.gitignore) file as it can often contain secrets like private keys and RPC URLs. +For more information on how to configure the tests, see the [testconfig README](./testconfig/README.md). + ## Build If you'd like to run the tests on a local build of Chainlink, you can point to your own docker image, or build a fresh one with `make`. @@ -76,8 +78,7 @@ make test_soak_ocr_reorg_2 Run reorg/automation_reorg_test.go with reorg settings: -1. Use Simulated Geth network and put GethReorgConfig in overrides.toml - +1. Use Simulated Geth network and put GethReorgConfig in overrides.toml ```toml [Network] diff --git a/integration-tests/testconfig/README.md b/integration-tests/testconfig/README.md index 7ff6cedd24c..878b36bc756 100644 --- a/integration-tests/testconfig/README.md +++ b/integration-tests/testconfig/README.md @@ -137,7 +137,9 @@ DefaultTransactionQueueDepth = 0 """ ``` Note that you cannot override individual values in BaseConfigTOML. You must provide the entire configuration. +This corresponds to [Config struct](../../core/services/chainlink/config.go) in Chainlink Node that excludes all chain-specific configuration, which is built based on selected_networks and either Chainlink Node's defaults for each network, or `ChainConfigTOMLByChainID` (if an entry with matching chain id is defined) or `CommonChainConfigTOML` (if no entry with matching chain id is defined). +If BaseConfigTOML is empty, then default base config provided by the Chainlink Node is used. If tracing is enabled unique id will be generated and shared between all Chainlink nodes in the same test. To set base config for EVM chains use `NodeConfig.CommonChainConfigTOML`. Example: ```toml @@ -153,12 +155,12 @@ FeeCapDefault = '200 gwei' """ ``` -This is the default configuration used for all EVM chains unless ChainConfigTOMLByChainID is specified. +This is the default configuration used for all EVM chains unless `ChainConfigTOMLByChainID` is specified. Do remember that if either `ChainConfigTOMLByChainID` or `CommonChainConfigTOML` is defined, it will override any defaults that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always dynamically generated based on the EVMNetwork configuration. To set custom per-chain config use `[NodeConfig.ChainConfigTOMLByChainID]`. Example: ```toml [NodeConfig.ChainConfigTOMLByChainID] -# applicable for arbitrum-goerli chain +# applicable only to arbitrum-goerli chain 421613 = """ [GasEstimator] PriceMax = '400 gwei' @@ -170,7 +172,8 @@ BumpMin = '100 gwei' """ ``` -For more examples see `example.toml` in product TOML configs like `testconfig/automation/example.toml`. +For more examples see `example.toml` in product TOML configs like `testconfig/automation/example.toml`. If either ChainConfigTOMLByChainID or CommonChainConfigTOML is defined, it will override any defaults that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always dynamically generated based on the EVMNetwork configuration. +Currently, all networks are treated as EVM networks. There's no way to provide Solana, Starknet, Cosmos or Aptos configuration yet. ### Setting env vars for Chainlink Node diff --git a/integration-tests/testconfig/default.toml b/integration-tests/testconfig/default.toml index e4e216cf4a8..0d0bb14da95 100644 --- a/integration-tests/testconfig/default.toml +++ b/integration-tests/testconfig/default.toml @@ -1,37 +1,72 @@ [Logging] +# set to true to flush logs to selected target regardless of test result; otherwise logs are only flushed if test failed test_log_collect = false [Logging.LogStream] +# supported targets: file, loki, in-memory. if empty no logs will be persisted log_targets = ["file"] +# context timeout for starting log producer and also time-frame for requesting logs log_producer_timeout = "10s" +# number of retries before log producer gives up and stops listening to logs log_producer_retry_limit = 10 [ChainlinkImage] +# postgres version to use postgres_version = "15.6" +# chainlink image to use image = "public.ecr.aws/chainlink/chainlink" +# chainlink image tag to use version = "2.12.0" [Common] +# chainlink node funding in native token chainlink_node_funding = 0.5 [Network] +# slice of networks to use; at lesat one network must be selected; each selected network must either be already defined in the CTF as a known network, or be defined in +# TOML test files as a new network selected_networks = ["simulated"] [PrivateEthereumNetwork] +# ethereum version to use; eth1 or eth2 (post-merge) ethereum_version = "eth1" +# execution layer to use; geth, besu, nethermind, erigon or reth execution_layer = "geth" [PrivateEthereumNetwork.EthereumChainConfig] +# duration of single slot, lower => faster block production, must be >= 3 seconds_per_slot = 3 +# number of slots in epoch, lower => faster epoch finalisation, must be >= 2 slots_per_epoch = 2 +# extra genesis delay, no need to modify, but it should be after all validators/beacon chain starts genesis_delay = 15 +# number of validators in the network validator_count = 4 +# chain id to use chain_id = 1337 +# slice of addresses that will be funded with native token in genesis addresses_to_fund = ["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"] +# map of hard fork epochs for each network; key is fork name, value is hard fork epoch +# keep in mind that this depends on the specific version of eth2 client you are using +# this configuration is fault-tolerant and incorrect forks will be ignored [PrivateEthereumNetwork.EthereumChainConfig.HardForkEpochs] Deneb = 500 +# General config of the Chainklink node corresponding to core/services/chainlink/config.go (Config struct) that excludes +# all chain-specific configuration, which is built based on selected_networks and either Chainlink Node's defaults for +# each network, or ChainConfigTOMLByChainID (if an entry with matching chain id is defined) or CommonChainConfigTOML (if no +# entry with matching chain id is defined). +# +# Please remember that if either ChainConfigTOMLByChainID or CommonChainConfigTOML is defined, it will override any defaults +# that Chainlink Node might have for the given network. Part of the configuration that defines blockchain node URLs is always +# dynamically generated based on the EVMNetwork configuration. +# +# Last, but not least, currently all selected networks are treated as EVM networks. There's no way to provide Solana, Starknet, +# Cosmos or Aptos configuration yet. +# +# If BaseConfigTOML is empty, then default base config provided by the Chainlink Node is used. +# Also, if tracing is enabled unique id will be generated and shared between all Chainlink nodes in the same test. [NodeConfig] BaseConfigTOML = """ [Feature] @@ -78,12 +113,14 @@ DeltaDial = '500ms' DeltaReconcile = '5s' """ -# override config toml related to EVMNode configs for chainlink nodes; applicable to all EVM node configs in chainlink toml +# Overrides default config TOML related to EVMNode configs for chainlink nodes; applicable to all EVM node configs in chainlink TOML. +# Do not use it, if you want the default values to be used. Passing blockchain nodes URLs here will have no effect. CommonChainConfigTOML = """ """ [NodeConfig.ChainConfigTOMLByChainID] -# applicable for simulated chain +# Chain-specific EVMNode config TOML for chainlink nodes; applicable to all EVM node configs in chainlink TOML. It takes precedence +# over CommonChainConfigTOML and Chainlink Node's defaults. Passing blockchain nodes URLs here will have no effect. 1337 = """ AutoCreateKey = true FinalityDepth = 1