From 93953229fdd6006129c55db1b32bb6becee0af08 Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Thu, 29 Aug 2024 16:26:25 +0200 Subject: [PATCH] Feature/multi net support (#670) * multi autoclaim support + api * Check reorg fix * scripts + mt extra test * e2e test * AutoClaim Compressor working with multi rollup * refactor + unit tests + compressor e2e using multi-single-bridge * linter * fix e2e * fix unit tests * Fix db * fix tests * fix e2e * fix benchmark test * remove TODO * comment --- Makefile | 32 +- bridgectrl/merkletree_test.go | 100 +++++ claimtxman/claimtxman.go | 90 +++-- claimtxman/claimtxman_test.go | 26 +- claimtxman/interfaces.go | 4 +- claimtxman/mocks/bridge_service_interface.go | 79 ++-- claimtxman/mocks/etherman_i.go | 160 ++++++++ .../mocks/storage_compressed_interface.go | 29 +- claimtxman/mocks/storage_interface.go | 58 +-- claimtxman/monitor_compressed_txs.go | 9 +- claimtxman/monitortxs.go | 25 +- claimtxman/types/monitoredtx.go | 4 +- cmd/run.go | 88 +++-- config/config.debug.toml | 1 - config/config.local.toml | 1 - config/network.go | 5 - db/pgstorage/migrations/0012.sql | 12 + db/pgstorage/migrations/0012_test.go | 144 +++++++ db/pgstorage/pgstorage.go | 74 ++-- db/pgstorage/pgstorage_test.go | 30 -- db/storage_test.go | 17 +- docker-compose.yml | 41 +- etherman/etherman.go | 224 ++++------- etherman/simulated.go | 9 +- etherman/types.go | 2 + server/interfaces.go | 2 +- server/service.go | 52 +-- synchronizer/interfaces.go | 4 +- synchronizer/mock_etherman.go | 78 +--- synchronizer/mock_storage.go | 57 --- synchronizer/synchronizer.go | 138 ++++--- synchronizer/synchronizer_test.go | 350 +++++++----------- test/benchmark/api_test.go | 1 + test/e2e/bridge_test.go | 86 ++--- test/e2e/compress_test.go | 19 +- test/e2e/l2_l2_test.go | 5 +- test/e2e/multiplerollups_test.go | 4 +- test/operations/interfaces.go | 4 +- test/operations/manager.go | 89 ++--- test/operations/mockserver.go | 2 +- test/scripts/claim/main.go | 2 +- test/scripts/readLatestLER/main.go | 36 ++ utils/client.go | 16 + 43 files changed, 1234 insertions(+), 975 deletions(-) create mode 100644 claimtxman/mocks/etherman_i.go create mode 100644 db/pgstorage/migrations/0012.sql create mode 100644 db/pgstorage/migrations/0012_test.go create mode 100644 test/scripts/readLatestLER/main.go diff --git a/Makefile b/Makefile index af2a9ce6..ef0966b0 100644 --- a/Makefile +++ b/Makefile @@ -23,6 +23,7 @@ DOCKER_COMPOSE_BRIDGE := zkevm-bridge-service DOCKER_COMPOSE_BRIDGE_V1TOV2 := zkevm-bridge-service-v1tov2 DOCKER_COMPOSE_BRIDGE_1 := zkevm-bridge-service-1 DOCKER_COMPOSE_BRIDGE_2 := zkevm-bridge-service-2 +DOCKER_COMPOSE_BRIDGE_3 := zkevm-bridge-service-3 RUN_STATE_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_STATE_DB) RUN_POOL_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_POOL_DB) @@ -47,6 +48,7 @@ RUN_ZKPROVER_V1TOV2 := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_ZKPROVER_V1TOV2) RUN_BRIDGE := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_BRIDGE) RUN_BRIDGE_1 := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_BRIDGE_1) RUN_BRIDGE_2 := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_BRIDGE_2) +RUN_BRIDGE_3 := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_BRIDGE_3) RUN_BRIDGE_V1TOV2 := $(DOCKER_COMPOSE) up -d $(DOCKER_COMPOSE_BRIDGE_V1TOV2) STOP_NODE_DB := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_NODE_DB) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_NODE_DB) @@ -67,6 +69,7 @@ STOP_ZKPROVER_V1TOV2 := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_ZKPROVER_V1TOV2) STOP_BRIDGE := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_BRIDGE) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_BRIDGE) STOP_BRIDGE_1 := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_BRIDGE_1) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_BRIDGE_1) STOP_BRIDGE_2 := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_BRIDGE_2) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_BRIDGE_2) +STOP_BRIDGE_3 := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_BRIDGE_3) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_BRIDGE_3) STOP_BRIDGE_V1TOV2 := $(DOCKER_COMPOSE) stop $(DOCKER_COMPOSE_BRIDGE_V1TOV2) && $(DOCKER_COMPOSE) rm -f $(DOCKER_COMPOSE_BRIDGE_V1TOV2) STOP := $(DOCKER_COMPOSE) down --remove-orphans @@ -260,6 +263,14 @@ run-bridge-2: ## Runs the bridge service stop-bridge-2: ## Stops the bridge service $(STOP_BRIDGE_2) +.PHONY: run-bridge-3 +run-bridge-3: ## Runs the bridge service + $(RUN_BRIDGE_3) + +.PHONY: stop-bridge-3 +stop-bridge-3: ## Stops the bridge service + $(STOP_BRIDGE_3) + .PHONY: run-bridge-v1tov2 run-bridge-v1tov2: ## Runs the bridge service $(RUN_BRIDGE_V1TOV2) @@ -323,6 +334,21 @@ run-multi: ## runs all services $(RUN_BRIDGE_1) $(RUN_BRIDGE_2) +.PHONY: run-multi-single-bridge +run-multi-single-bridge: ## runs all services + $(RUN_DBS) + ${RUN_STATE_DB_2} + ${RUN_POOL_DB_2} + $(RUN_L1_NETWORK_MULTI_ROLLUP) + sleep 5 + $(RUN_ZKPROVER_1) + $(RUN_ZKPROVER_2) + sleep 3 + $(RUN_NODE_1) + $(RUN_NODE_2) + sleep 7 + $(RUN_BRIDGE_3) + .PHONY: run-bridge-dependencies run-bridge-dependencies: stop ## runs all services $(RUN_DBS) @@ -382,17 +408,17 @@ test-edge: build-docker stop run ## Runs all tests checking race conditions trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='edge' .PHONY: test-multiplerollups -test-multiplerollups: build-docker stop run-multi ## Runs all tests checking race conditions +test-multiplerollups: build-docker stop run-multi-single-bridge ## Runs all tests checking race conditions sleep 3 trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='multiplerollups' .PHONY: test-l2l2 -test-l2l2: build-docker stop run-multi ## Runs all tests checking race conditions +test-l2l2: build-docker stop run-multi-single-bridge ## Runs all tests checking race conditions sleep 3 trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='l2l2' .PHONY: test-e2ecompress -test-e2ecompress: build-docker stop run ## Runs all tests checking race conditions +test-e2ecompress: build-docker stop run-multi-single-bridge ## Runs all tests checking race conditions sleep 3 trap '$(STOP)' EXIT; MallocNanoZone=0 go test -v -failfast -race -p 1 -timeout 2400s ./test/e2e/... -count 1 -tags='e2ecompress' diff --git a/bridgectrl/merkletree_test.go b/bridgectrl/merkletree_test.go index 42fd12f7..30124bdd 100644 --- a/bridgectrl/merkletree_test.go +++ b/bridgectrl/merkletree_test.go @@ -434,6 +434,106 @@ func TestCheckMerkleProof(t *testing.T) { assert.Equal(t, expectedRoot, root) } +func TestCheckMerkleProof2(t *testing.T) { + expectedLeafHash := common.HexToHash("0x697a56b92100081c2637a9d162509380ab75ec163c69e2ce0ebe1c444977c5e0") + expectedRollup1Root := common.HexToHash("0x7c942bb17191a0c14d50dc079873bc7d84922be5c9a5307aa58b8b34d70cd67b") + expectedRollupsTreeRoot := common.HexToHash("0x8e00abaf690edf420ba30016e4573a805300e89d634714007ae98519294cc58d") + + var index uint + var height uint8 = 32 + amount, _ := big.NewInt(0).SetString("90000000000000000", 0) + deposit := ðerman.Deposit{ + OriginalNetwork: 0, + OriginalAddress: common.Address{}, + Amount: amount, + DestinationNetwork: 2, + DestinationAddress: common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), + BlockNumber: 3673, + DepositCount: 0, + Metadata: []byte{}, + } + leafBytes := hashDeposit(deposit) + leafHash := common.BytesToHash(leafBytes[:]) + t.Log("leafHash: ", leafHash) + assert.Equal(t, expectedLeafHash, leafHash) + smtProof := [][KeyLen]byte{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), + common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"), + common.HexToHash("0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85"), + common.HexToHash("0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344"), + common.HexToHash("0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d"), + common.HexToHash("0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968"), + common.HexToHash("0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83"), + common.HexToHash("0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af"), + common.HexToHash("0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0"), + common.HexToHash("0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5"), + common.HexToHash("0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892"), + common.HexToHash("0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c"), + common.HexToHash("0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb"), + common.HexToHash("0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc"), + common.HexToHash("0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2"), + common.HexToHash("0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f"), + common.HexToHash("0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a"), + common.HexToHash("0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0"), + common.HexToHash("0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0"), + common.HexToHash("0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2"), + common.HexToHash("0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9"), + common.HexToHash("0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377"), + common.HexToHash("0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652"), + common.HexToHash("0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef"), + common.HexToHash("0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d"), + common.HexToHash("0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0"), + common.HexToHash("0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e"), + common.HexToHash("0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e"), + common.HexToHash("0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322"), + common.HexToHash("0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735"), + common.HexToHash("0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"), + } + root := calculateRoot(leafHash, smtProof, index, height) + t.Log("root: ", root) + assert.Equal(t, expectedRollup1Root, root) + + leafHash2 := expectedRollup1Root + smtProof2 := [][KeyLen]byte{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + common.HexToHash("0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5"), + common.HexToHash("0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30"), + common.HexToHash("0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85"), + common.HexToHash("0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344"), + common.HexToHash("0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d"), + common.HexToHash("0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968"), + common.HexToHash("0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83"), + common.HexToHash("0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af"), + common.HexToHash("0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0"), + common.HexToHash("0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5"), + common.HexToHash("0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892"), + common.HexToHash("0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c"), + common.HexToHash("0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb"), + common.HexToHash("0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc"), + common.HexToHash("0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2"), + common.HexToHash("0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f"), + common.HexToHash("0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a"), + common.HexToHash("0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0"), + common.HexToHash("0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0"), + common.HexToHash("0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2"), + common.HexToHash("0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9"), + common.HexToHash("0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377"), + common.HexToHash("0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652"), + common.HexToHash("0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef"), + common.HexToHash("0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d"), + common.HexToHash("0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0"), + common.HexToHash("0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e"), + common.HexToHash("0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e"), + common.HexToHash("0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322"), + common.HexToHash("0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735"), + common.HexToHash("0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"), + } + root2 := calculateRoot(leafHash2, smtProof2, index, height) + t.Log("rollupsExitRoot: ", root2) + assert.Equal(t, expectedRollupsTreeRoot, root2) +} + func TestPerformanceComputeRoot(t *testing.T) { ctx := context.Background() dbCfg := pgstorage.NewConfigFromEnv() diff --git a/claimtxman/claimtxman.go b/claimtxman/claimtxman.go index 2c0959fa..5648090f 100644 --- a/claimtxman/claimtxman.go +++ b/claimtxman/claimtxman.go @@ -66,10 +66,10 @@ func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *et var monitorTx ctmtypes.TxMonitorer if cfg.GroupingClaims.Enabled { log.Info("ClaimTxManager working in compressor mode to group claim txs") - monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime(), cfg.GroupingClaims.GasOffset) + monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime(), cfg.GroupingClaims.GasOffset, rollupID) } else { log.Info("ClaimTxManager working in regular mode to send claim txs individually") - monitorTx = NewMonitorTxs(ctx, storage.(StorageInterface), client, cfg, nonceCache, auth) + monitorTx = NewMonitorTxs(ctx, storage.(StorageInterface), client, cfg, nonceCache, rollupID, auth) } return &ClaimTxManager{ ctx: ctx, @@ -108,15 +108,15 @@ func (tm *ClaimTxManager) Start() { } case ger = <-tm.chExitRootEvent: if tm.synced { - log.Debug("UpdateDepositsStatus for ger: ", ger.GlobalExitRoot) + log.Debugf("RollupID: %d UpdateDepositsStatus for ger: %s", tm.rollupID, ger.GlobalExitRoot.String()) if tm.cfg.GroupingClaims.Enabled { - log.Debug("Ger value updated and ready to be processed...") + log.Debugf("rollupID: %d, Ger value updated and ready to be processed...", tm.rollupID) continue } go func() { err := tm.updateDepositsStatus(ger) if err != nil { - log.Errorf("failed to update deposits status: %v", err) + log.Errorf("rollupID: %d, failed to update deposits status: %v", tm.rollupID, err) } }() } else { @@ -124,11 +124,11 @@ func (tm *ClaimTxManager) Start() { } case <-compressorTicker.C: if tm.synced && tm.cfg.GroupingClaims.Enabled && ger.GlobalExitRoot != latestProcessedGer { - log.Info("Processing deposits for ger: ", ger.GlobalExitRoot) + log.Infof("RollupID: %d,Processing deposits for ger: %s", tm.rollupID, ger.GlobalExitRoot.String()) go func() { err := tm.updateDepositsStatus(ger) if err != nil { - log.Errorf("failed to update deposits status: %v", err) + log.Errorf("rollupID: %d, failed to update deposits status: %v", tm.rollupID, err) } }() latestProcessedGer = ger.GlobalExitRoot @@ -136,7 +136,7 @@ func (tm *ClaimTxManager) Start() { case <-ticker.C: err := tm.monitorTxs.MonitorTxs(tm.ctx) if err != nil { - log.Errorf("failed to monitor txs: %v", err) + log.Errorf("rollupID: %d, failed to monitor txs: %v", tm.rollupID, err) } } } @@ -149,60 +149,61 @@ func (tm *ClaimTxManager) updateDepositsStatus(ger *etherman.GlobalExitRoot) err } err = tm.processDepositStatus(ger, dbTx) if err != nil { - log.Errorf("error processing ger. Error: %v", err) + log.Errorf("rollupID: %d, error processing ger. Error: %v", tm.rollupID, err) rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) + log.Errorf("rollupID: %d, claimtxman error rolling back state. RollbackErr: %v, err: %s", tm.rollupID, rollbackErr, err.Error()) return rollbackErr } return err } err = tm.storage.Commit(tm.ctx, dbTx) if err != nil { - log.Errorf("AddClaimTx committing dbTx. Err: %v", err) + log.Errorf("rollupID: %d, AddClaimTx committing dbTx. Err: %v", tm.rollupID, err) rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) if rollbackErr != nil { - log.Fatalf("claimtxman error rolling back state. RollbackErr: %s, err: %s", rollbackErr.Error(), err.Error()) + log.Errorf("rollupID: %d, claimtxman error rolling back state. RollbackErr: %s, err: %s", tm.rollupID, rollbackErr.Error(), err.Error()) + return rollbackErr } - log.Fatalf("AddClaimTx committing dbTx, err: %s", err.Error()) + return err } return nil } func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbTx pgx.Tx) error { if ger.BlockID != 0 { // L2 exit root is updated - log.Infof("Rollup exitroot %v is updated", ger.ExitRoots[1]) + log.Infof("RollupID: %d, Rollup exitroot %v is updated", tm.rollupID, ger.ExitRoots[1]) if err := tm.storage.UpdateL2DepositsStatus(tm.ctx, ger.ExitRoots[1][:], tm.rollupID, tm.l2NetworkID, dbTx); err != nil { - log.Errorf("error updating L2DepositsStatus. Error: %v", err) + log.Errorf("rollupID: %d, error updating L2DepositsStatus. Error: %v", tm.rollupID, err) return err } } else { // L1 exit root is updated in the trusted state - log.Infof("Mainnet exitroot %v is updated", ger.ExitRoots[0]) - deposits, err := tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], dbTx) + log.Infof("RollupID: %d, Mainnet exitroot %v is updated", tm.rollupID, ger.ExitRoots[0]) + deposits, err := tm.storage.UpdateL1DepositsStatus(tm.ctx, ger.ExitRoots[0][:], tm.l2NetworkID, dbTx) if err != nil { - log.Errorf("error getting and updating L1DepositsStatus. Error: %v", err) + log.Errorf("rollupID: %d, error getting and updating L1DepositsStatus. Error: %v", tm.rollupID, err) return err } for _, deposit := range deposits { if tm.l2NetworkID != deposit.DestinationNetwork { - log.Infof("Ignoring deposit: %d: dest_net: %d, we are:%d", deposit.DepositCount, deposit.DestinationNetwork, tm.l2NetworkID) + log.Infof("Ignoring deposit id: %d deposit count:%d dest_net: %d, we are:%d", deposit.Id, deposit.DepositCount, deposit.DestinationNetwork, tm.l2NetworkID) continue } - claimHash, err := tm.bridgeService.GetDepositStatus(tm.ctx, deposit.DepositCount, deposit.OriginalNetwork, deposit.DestinationNetwork) + claimHash, err := tm.bridgeService.GetDepositStatus(tm.ctx, deposit.DepositCount, deposit.NetworkID, deposit.DestinationNetwork) if err != nil { - log.Errorf("error getting deposit status for deposit %d. Error: %v", deposit.DepositCount, err) + log.Errorf("rollupID: %d, error getting deposit status for deposit id %d. Error: %v", tm.rollupID, deposit.Id, err) return err } if len(claimHash) > 0 || deposit.LeafType == LeafTypeMessage && !tm.isDepositMessageAllowed(deposit) { - log.Infof("Ignoring deposit: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", deposit.DepositCount, deposit.LeafType, claimHash, deposit.OriginalAddress.String()) + log.Infof("RollupID: %d, Ignoring deposit Id: %d, leafType: %d, claimHash: %s, deposit.OriginalAddress: %s", tm.rollupID, deposit.Id, deposit.LeafType, claimHash, deposit.OriginalAddress.String()) continue } - log.Infof("create the claim tx for the deposit %d", deposit.DepositCount) + log.Infof("RollupID: %d, create the claim tx for the deposit count %d. Deposit Id: %d", tm.rollupID, deposit.DepositCount, deposit.Id) ger, proof, rollupProof, err := tm.bridgeService.GetClaimProofForCompressed(ger.GlobalExitRoot, deposit.DepositCount, deposit.NetworkID, dbTx) if err != nil { - log.Errorf("error getting Claim Proof for deposit %d. Error: %v", deposit.DepositCount, err) + log.Errorf("rollupID: %d, error getting Claim Proof for deposit Id %d. Error: %v", tm.rollupID, deposit.Id, err) return err } var ( @@ -221,11 +222,11 @@ func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbT }}, 1, 1, 1, tm.rollupID, tm.auth) if err != nil { - log.Errorf("error BuildSendClaim tx for deposit %d. Error: %v", deposit.DepositCount, err) + log.Errorf("rollupID: %d, error BuildSendClaim tx for deposit Id: %d. Error: %v", tm.rollupID, deposit.Id, err) return err } - if err = tm.addClaimTx(deposit.DepositCount, tm.auth.From, tx.To(), nil, tx.Data(), ger.GlobalExitRoot, dbTx); err != nil { - log.Errorf("error adding claim tx for deposit %d. Error: %v", deposit.DepositCount, err) + if err = tm.addClaimTx(deposit.Id, tm.auth.From, tx.To(), nil, tx.Data(), ger.GlobalExitRoot, dbTx); err != nil { + log.Errorf("rollupID: %d, error adding claim tx for deposit Id: %d Error: %v", tm.rollupID, deposit.Id, err) return err } } @@ -236,15 +237,15 @@ func (tm *ClaimTxManager) processDepositStatus(ger *etherman.GlobalExitRoot, dbT func (tm *ClaimTxManager) isDepositMessageAllowed(deposit *etherman.Deposit) bool { for _, addr := range tm.cfg.AuthorizedClaimMessageAddresses { if deposit.OriginalAddress == addr { - log.Infof("MessageBridge from authorized account detected: %+v, account: %s", deposit, addr.String()) + log.Infof("RollupID: %d, MessageBridge from authorized account detected: %+v, account: %s", tm.rollupID, deposit, addr.String()) return true } } - log.Infof("MessageBridge Not authorized. DepositCount: %d", deposit.DepositCount) + log.Infof("RollupID: %d, MessageBridge Not authorized. DepositCount: %d. DepositID: %d", tm.rollupID, deposit.DepositCount, deposit.Id) return false } -func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to *common.Address, value *big.Int, data []byte, ger common.Hash, dbTx pgx.Tx) error { +func (tm *ClaimTxManager) addClaimTx(depositID uint64, from common.Address, to *common.Address, value *big.Int, data []byte, ger common.Hash, dbTx pgx.Tx) error { // get gas tx := ethereum.CallMsg{ From: from, @@ -254,25 +255,42 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to } gas, err := tm.l2Node.EstimateGas(tm.ctx, tx) for i := 1; err != nil && err.Error() != runtime.ErrExecutionReverted.Error() && i < tm.cfg.RetryNumber; i++ { - log.Warnf("error while doing gas estimation. Retrying... Error: %v, Data: %s", err, common.Bytes2Hex(data)) + log.Warnf("rollupID: %d, error while doing gas estimation. Retrying... Error: %v, Data: %s", tm.rollupID, err, common.Bytes2Hex(data)) time.Sleep(tm.cfg.RetryInterval.Duration) gas, err = tm.l2Node.EstimateGas(tm.ctx, tx) } if err != nil { - log.Errorf("failed to estimate gas. Ignoring tx... Error: %v, data: %s", err, common.Bytes2Hex(data)) + var b string + block, err2 := tm.l2Node.Client.BlockByNumber(tm.ctx, nil) + if err2 != nil { + log.Error("error getting blockNumber. Error: ", err2) + b = "latest" + } else { + b = fmt.Sprintf("%x", block.Number()) + } + log.Warnf(`Use the next command to debug it manually. + curl --location --request POST 'http://localhost:8545' \ + --header 'Content-Type: application/json' \ + --data-raw '{ + "jsonrpc": "2.0", + "method": "eth_call", + "params": [{"from": "%s","to":"%s","data":"0x%s"},"0x%s"], + "id": 1 + }'`, from, to, common.Bytes2Hex(data), b) + log.Errorf("rollupID: %d, failed to estimate gas. Ignoring tx... Error: %v, data: %s, GER: %s", tm.rollupID, err, common.Bytes2Hex(data), ger.String()) return nil } // get next nonce nonce, err := tm.nonceCache.GetNextNonce(from) if err != nil { - err := fmt.Errorf("failed to get current nonce: %v", err) + err := fmt.Errorf("rollupID: %d, failed to get current nonce: %v", tm.rollupID, err) log.Errorf("error getting next nonce. Error: %s", err.Error()) return err } // create monitored tx mTx := ctmtypes.MonitoredTx{ - DepositID: depositCount, From: from, To: to, + DepositID: depositID, From: from, To: to, Nonce: nonce, Value: value, Data: data, Gas: gas, Status: ctmtypes.MonitoredTxStatusCreated, GlobalExitRoot: ger, @@ -281,7 +299,7 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to // add to storage err = tm.storage.AddClaimTx(tm.ctx, mTx, dbTx) if err != nil { - err := fmt.Errorf("failed to add tx to get monitored: %v", err) + err := fmt.Errorf("rollupID: %d, failed to add tx to get monitored: %v", tm.rollupID, err) log.Errorf("error adding claim tx to db. Error: %s", err.Error()) return err } @@ -293,7 +311,7 @@ func (tm *ClaimTxManager) addClaimTx(depositCount uint, from common.Address, to // accordingly to the current information stored and the current // state of the blockchain func (tm *ClaimTxManager) ReviewMonitoredTx(ctx context.Context, mTx *ctmtypes.MonitoredTx, reviewNonce bool) error { - mTxLog := log.WithFields("monitoredTx", mTx.DepositID) + mTxLog := log.WithFields("monitoredTx", mTx.DepositID, "rollupID", tm.rollupID) mTxLog.Debug("reviewing") // get gas tx := ethereum.CallMsg{ diff --git a/claimtxman/claimtxman_test.go b/claimtxman/claimtxman_test.go index c6dfc194..dd150f0f 100644 --- a/claimtxman/claimtxman_test.go +++ b/claimtxman/claimtxman_test.go @@ -27,7 +27,7 @@ func TestMonitoredTxStorage(t *testing.T) { tx, err := pg.BeginDBTransaction(ctx) require.NoError(t, err) - deposit := ðerman.Deposit{ + deposit1 := ðerman.Deposit{ NetworkID: 0, OriginalNetwork: 0, OriginalAddress: common.HexToAddress("0x6B175474E89094C44Da98b954EedeAC495271d0F"), @@ -38,7 +38,21 @@ func TestMonitoredTxStorage(t *testing.T) { DepositCount: 1, Metadata: common.FromHex("0x0"), } - _, err = pg.AddDeposit(ctx, deposit, tx) + _, err = pg.AddDeposit(ctx, deposit1, tx) + require.NoError(t, err) + + deposit2 := ðerman.Deposit{ + NetworkID: 0, + OriginalNetwork: 0, + OriginalAddress: common.HexToAddress("0x6B175474E89094C44Da98b954EedeAC495271d0F"), + Amount: big.NewInt(1000000), + DestinationNetwork: 1, + DestinationAddress: common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"), + BlockNumber: 1, + DepositCount: 2, + Metadata: common.FromHex("0x0"), + } + _, err = pg.AddDeposit(ctx, deposit2, tx) require.NoError(t, err) toAdr := common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266") @@ -70,7 +84,7 @@ func TestMonitoredTxStorage(t *testing.T) { DepositID: 2, From: common.HexToAddress("0x6B175474E89094C44Da98b954EedeAC495271d0F"), To: &toAdr, - Nonce: 1, + Nonce: 2, Value: big.NewInt(1000000), Data: common.FromHex("0x0"), Gas: 1000000, @@ -80,11 +94,11 @@ func TestMonitoredTxStorage(t *testing.T) { err = pg.AddClaimTx(ctx, mTx, tx) require.NoError(t, err) - mTxs, err := pg.GetClaimTxsByStatus(ctx, []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated}, tx) + mTxs, err := pg.GetClaimTxsByStatus(ctx, []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated}, 1, tx) require.NoError(t, err) require.Len(t, mTxs, 1) - mTxs, err = pg.GetClaimTxsByStatus(ctx, []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated, ctmtypes.MonitoredTxStatusConfirmed}, tx) + mTxs, err = pg.GetClaimTxsByStatus(ctx, []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated, ctmtypes.MonitoredTxStatusConfirmed}, 1, tx) require.NoError(t, err) require.Len(t, mTxs, 2) @@ -174,7 +188,7 @@ func TestUpdateDepositStatus(t *testing.T) { l2Root1 := common.FromHex("0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2") require.NoError(t, pg.SetRoot(ctx, l2Root1, depositID, deposit.NetworkID, nil)) - deposits, err := pg.UpdateL1DepositsStatus(ctx, l1Root, nil) + deposits, err := pg.UpdateL1DepositsStatus(ctx, l1Root, deposit.DestinationNetwork, nil) require.NoError(t, err) require.Len(t, deposits, 1) require.True(t, deposits[0].ReadyForClaim) diff --git a/claimtxman/interfaces.go b/claimtxman/interfaces.go index f63507b0..c72c5afa 100644 --- a/claimtxman/interfaces.go +++ b/claimtxman/interfaces.go @@ -12,11 +12,11 @@ import ( type StorageInterface interface { AddBlock(ctx context.Context, block *etherman.Block, dbTx pgx.Tx) (uint64, error) - UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, dbTx pgx.Tx) ([]*etherman.Deposit, error) + UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint, dbTx pgx.Tx) error AddClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error UpdateClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error - GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, dbTx pgx.Tx) ([]types.MonitoredTx, error) + GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) // atomic Rollback(ctx context.Context, dbTx pgx.Tx) error BeginDBTransaction(ctx context.Context) (pgx.Tx, error) diff --git a/claimtxman/mocks/bridge_service_interface.go b/claimtxman/mocks/bridge_service_interface.go index eb131b87..2ddd355d 100644 --- a/claimtxman/mocks/bridge_service_interface.go +++ b/claimtxman/mocks/bridge_service_interface.go @@ -5,7 +5,10 @@ package mock_txcompressor import ( context "context" + common "github.com/ethereum/go-ethereum/common" + etherman "github.com/0xPolygonHermez/zkevm-bridge-service/etherman" + mock "github.com/stretchr/testify/mock" pgx "github.com/jackc/pgx/v4" @@ -24,47 +27,47 @@ func (_m *bridgeServiceInterface) EXPECT() *bridgeServiceInterface_Expecter { return &bridgeServiceInterface_Expecter{mock: &_m.Mock} } -// GetClaimProof provides a mock function with given fields: depositCnt, networkID, dbTx -func (_m *bridgeServiceInterface) GetClaimProof(depositCnt uint, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error) { - ret := _m.Called(depositCnt, networkID, dbTx) +// GetClaimProofForCompressed provides a mock function with given fields: ger, depositCnt, networkID, dbTx +func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, depositCnt uint, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error) { + ret := _m.Called(ger, depositCnt, networkID, dbTx) if len(ret) == 0 { - panic("no return value specified for GetClaimProof") + panic("no return value specified for GetClaimProofForCompressed") } var r0 *etherman.GlobalExitRoot var r1 [][32]byte var r2 [][32]byte var r3 error - if rf, ok := ret.Get(0).(func(uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)); ok { - return rf(depositCnt, networkID, dbTx) + if rf, ok := ret.Get(0).(func(common.Hash, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)); ok { + return rf(ger, depositCnt, networkID, dbTx) } - if rf, ok := ret.Get(0).(func(uint, uint, pgx.Tx) *etherman.GlobalExitRoot); ok { - r0 = rf(depositCnt, networkID, dbTx) + if rf, ok := ret.Get(0).(func(common.Hash, uint, uint, pgx.Tx) *etherman.GlobalExitRoot); ok { + r0 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*etherman.GlobalExitRoot) } } - if rf, ok := ret.Get(1).(func(uint, uint, pgx.Tx) [][32]byte); ok { - r1 = rf(depositCnt, networkID, dbTx) + if rf, ok := ret.Get(1).(func(common.Hash, uint, uint, pgx.Tx) [][32]byte); ok { + r1 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([][32]byte) } } - if rf, ok := ret.Get(2).(func(uint, uint, pgx.Tx) [][32]byte); ok { - r2 = rf(depositCnt, networkID, dbTx) + if rf, ok := ret.Get(2).(func(common.Hash, uint, uint, pgx.Tx) [][32]byte); ok { + r2 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(2) != nil { r2 = ret.Get(2).([][32]byte) } } - if rf, ok := ret.Get(3).(func(uint, uint, pgx.Tx) error); ok { - r3 = rf(depositCnt, networkID, dbTx) + if rf, ok := ret.Get(3).(func(common.Hash, uint, uint, pgx.Tx) error); ok { + r3 = rf(ger, depositCnt, networkID, dbTx) } else { r3 = ret.Error(3) } @@ -72,39 +75,40 @@ func (_m *bridgeServiceInterface) GetClaimProof(depositCnt uint, networkID uint, return r0, r1, r2, r3 } -// bridgeServiceInterface_GetClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimProof' -type bridgeServiceInterface_GetClaimProof_Call struct { +// bridgeServiceInterface_GetClaimProofForCompressed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaimProofForCompressed' +type bridgeServiceInterface_GetClaimProofForCompressed_Call struct { *mock.Call } -// GetClaimProof is a helper method to define mock.On call +// GetClaimProofForCompressed is a helper method to define mock.On call +// - ger common.Hash // - depositCnt uint // - networkID uint // - dbTx pgx.Tx -func (_e *bridgeServiceInterface_Expecter) GetClaimProof(depositCnt interface{}, networkID interface{}, dbTx interface{}) *bridgeServiceInterface_GetClaimProof_Call { - return &bridgeServiceInterface_GetClaimProof_Call{Call: _e.mock.On("GetClaimProof", depositCnt, networkID, dbTx)} +func (_e *bridgeServiceInterface_Expecter) GetClaimProofForCompressed(ger interface{}, depositCnt interface{}, networkID interface{}, dbTx interface{}) *bridgeServiceInterface_GetClaimProofForCompressed_Call { + return &bridgeServiceInterface_GetClaimProofForCompressed_Call{Call: _e.mock.On("GetClaimProofForCompressed", ger, depositCnt, networkID, dbTx)} } -func (_c *bridgeServiceInterface_GetClaimProof_Call) Run(run func(depositCnt uint, networkID uint, dbTx pgx.Tx)) *bridgeServiceInterface_GetClaimProof_Call { +func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) Run(run func(ger common.Hash, depositCnt uint, networkID uint, dbTx pgx.Tx)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint), args[1].(uint), args[2].(pgx.Tx)) + run(args[0].(common.Hash), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) }) return _c } -func (_c *bridgeServiceInterface_GetClaimProof_Call) Return(_a0 *etherman.GlobalExitRoot, _a1 [][32]byte, _a2 [][32]byte, _a3 error) *bridgeServiceInterface_GetClaimProof_Call { +func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) Return(_a0 *etherman.GlobalExitRoot, _a1 [][32]byte, _a2 [][32]byte, _a3 error) *bridgeServiceInterface_GetClaimProofForCompressed_Call { _c.Call.Return(_a0, _a1, _a2, _a3) return _c } -func (_c *bridgeServiceInterface_GetClaimProof_Call) RunAndReturn(run func(uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)) *bridgeServiceInterface_GetClaimProof_Call { +func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) RunAndReturn(run func(common.Hash, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { _c.Call.Return(run) return _c } -// GetDepositStatus provides a mock function with given fields: ctx, depositCount, destNetworkID -func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositCount uint, destNetworkID uint) (string, error) { - ret := _m.Called(ctx, depositCount, destNetworkID) +// GetDepositStatus provides a mock function with given fields: ctx, depositCount, networkID, destNetworkID +func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositCount uint, networkID uint, destNetworkID uint) (string, error) { + ret := _m.Called(ctx, depositCount, networkID, destNetworkID) if len(ret) == 0 { panic("no return value specified for GetDepositStatus") @@ -112,17 +116,17 @@ func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositC var r0 string var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint) (string, error)); ok { - return rf(ctx, depositCount, destNetworkID) + if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint) (string, error)); ok { + return rf(ctx, depositCount, networkID, destNetworkID) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint) string); ok { - r0 = rf(ctx, depositCount, destNetworkID) + if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint) string); ok { + r0 = rf(ctx, depositCount, networkID, destNetworkID) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint) error); ok { - r1 = rf(ctx, depositCount, destNetworkID) + if rf, ok := ret.Get(1).(func(context.Context, uint, uint, uint) error); ok { + r1 = rf(ctx, depositCount, networkID, destNetworkID) } else { r1 = ret.Error(1) } @@ -138,14 +142,15 @@ type bridgeServiceInterface_GetDepositStatus_Call struct { // GetDepositStatus is a helper method to define mock.On call // - ctx context.Context // - depositCount uint +// - networkID uint // - destNetworkID uint -func (_e *bridgeServiceInterface_Expecter) GetDepositStatus(ctx interface{}, depositCount interface{}, destNetworkID interface{}) *bridgeServiceInterface_GetDepositStatus_Call { - return &bridgeServiceInterface_GetDepositStatus_Call{Call: _e.mock.On("GetDepositStatus", ctx, depositCount, destNetworkID)} +func (_e *bridgeServiceInterface_Expecter) GetDepositStatus(ctx interface{}, depositCount interface{}, networkID interface{}, destNetworkID interface{}) *bridgeServiceInterface_GetDepositStatus_Call { + return &bridgeServiceInterface_GetDepositStatus_Call{Call: _e.mock.On("GetDepositStatus", ctx, depositCount, networkID, destNetworkID)} } -func (_c *bridgeServiceInterface_GetDepositStatus_Call) Run(run func(ctx context.Context, depositCount uint, destNetworkID uint)) *bridgeServiceInterface_GetDepositStatus_Call { +func (_c *bridgeServiceInterface_GetDepositStatus_Call) Run(run func(ctx context.Context, depositCount uint, networkID uint, destNetworkID uint)) *bridgeServiceInterface_GetDepositStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint)) + run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(uint)) }) return _c } @@ -155,7 +160,7 @@ func (_c *bridgeServiceInterface_GetDepositStatus_Call) Return(_a0 string, _a1 e return _c } -func (_c *bridgeServiceInterface_GetDepositStatus_Call) RunAndReturn(run func(context.Context, uint, uint) (string, error)) *bridgeServiceInterface_GetDepositStatus_Call { +func (_c *bridgeServiceInterface_GetDepositStatus_Call) RunAndReturn(run func(context.Context, uint, uint, uint) (string, error)) *bridgeServiceInterface_GetDepositStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/mocks/etherman_i.go b/claimtxman/mocks/etherman_i.go new file mode 100644 index 00000000..88daf8eb --- /dev/null +++ b/claimtxman/mocks/etherman_i.go @@ -0,0 +1,160 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock_txcompressor + +import ( + claimcompressor "github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/claimcompressor" + bind "github.com/ethereum/go-ethereum/accounts/abi/bind" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanI is an autogenerated mock type for the EthermanI type +type EthermanI struct { + mock.Mock +} + +type EthermanI_Expecter struct { + mock *mock.Mock +} + +func (_m *EthermanI) EXPECT() *EthermanI_Expecter { + return &EthermanI_Expecter{mock: &_m.Mock} +} + +// CompressClaimCall provides a mock function with given fields: mainnetExitRoot, rollupExitRoot, claimData +func (_m *EthermanI) CompressClaimCall(mainnetExitRoot common.Hash, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error) { + ret := _m.Called(mainnetExitRoot, rollupExitRoot, claimData) + + if len(ret) == 0 { + panic("no return value specified for CompressClaimCall") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash, common.Hash, []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error)); ok { + return rf(mainnetExitRoot, rollupExitRoot, claimData) + } + if rf, ok := ret.Get(0).(func(common.Hash, common.Hash, []claimcompressor.ClaimCompressorCompressClaimCallData) []byte); ok { + r0 = rf(mainnetExitRoot, rollupExitRoot, claimData) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash, common.Hash, []claimcompressor.ClaimCompressorCompressClaimCallData) error); ok { + r1 = rf(mainnetExitRoot, rollupExitRoot, claimData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthermanI_CompressClaimCall_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CompressClaimCall' +type EthermanI_CompressClaimCall_Call struct { + *mock.Call +} + +// CompressClaimCall is a helper method to define mock.On call +// - mainnetExitRoot common.Hash +// - rollupExitRoot common.Hash +// - claimData []claimcompressor.ClaimCompressorCompressClaimCallData +func (_e *EthermanI_Expecter) CompressClaimCall(mainnetExitRoot interface{}, rollupExitRoot interface{}, claimData interface{}) *EthermanI_CompressClaimCall_Call { + return &EthermanI_CompressClaimCall_Call{Call: _e.mock.On("CompressClaimCall", mainnetExitRoot, rollupExitRoot, claimData)} +} + +func (_c *EthermanI_CompressClaimCall_Call) Run(run func(mainnetExitRoot common.Hash, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData)) *EthermanI_CompressClaimCall_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash), args[1].(common.Hash), args[2].([]claimcompressor.ClaimCompressorCompressClaimCallData)) + }) + return _c +} + +func (_c *EthermanI_CompressClaimCall_Call) Return(_a0 []byte, _a1 error) *EthermanI_CompressClaimCall_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthermanI_CompressClaimCall_Call) RunAndReturn(run func(common.Hash, common.Hash, []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error)) *EthermanI_CompressClaimCall_Call { + _c.Call.Return(run) + return _c +} + +// SendCompressedClaims provides a mock function with given fields: auth, compressedTxData +func (_m *EthermanI) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error) { + ret := _m.Called(auth, compressedTxData) + + if len(ret) == 0 { + panic("no return value specified for SendCompressedClaims") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []byte) (*types.Transaction, error)); ok { + return rf(auth, compressedTxData) + } + if rf, ok := ret.Get(0).(func(*bind.TransactOpts, []byte) *types.Transaction); ok { + r0 = rf(auth, compressedTxData) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(*bind.TransactOpts, []byte) error); ok { + r1 = rf(auth, compressedTxData) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthermanI_SendCompressedClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCompressedClaims' +type EthermanI_SendCompressedClaims_Call struct { + *mock.Call +} + +// SendCompressedClaims is a helper method to define mock.On call +// - auth *bind.TransactOpts +// - compressedTxData []byte +func (_e *EthermanI_Expecter) SendCompressedClaims(auth interface{}, compressedTxData interface{}) *EthermanI_SendCompressedClaims_Call { + return &EthermanI_SendCompressedClaims_Call{Call: _e.mock.On("SendCompressedClaims", auth, compressedTxData)} +} + +func (_c *EthermanI_SendCompressedClaims_Call) Run(run func(auth *bind.TransactOpts, compressedTxData []byte)) *EthermanI_SendCompressedClaims_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bind.TransactOpts), args[1].([]byte)) + }) + return _c +} + +func (_c *EthermanI_SendCompressedClaims_Call) Return(_a0 *types.Transaction, _a1 error) *EthermanI_SendCompressedClaims_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthermanI_SendCompressedClaims_Call) RunAndReturn(run func(*bind.TransactOpts, []byte) (*types.Transaction, error)) *EthermanI_SendCompressedClaims_Call { + _c.Call.Return(run) + return _c +} + +// NewEthermanI creates a new instance of EthermanI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanI(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanI { + mock := &EthermanI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/claimtxman/mocks/storage_compressed_interface.go b/claimtxman/mocks/storage_compressed_interface.go index 1ff32049..ff706a23 100644 --- a/claimtxman/mocks/storage_compressed_interface.go +++ b/claimtxman/mocks/storage_compressed_interface.go @@ -177,9 +177,9 @@ func (_c *StorageCompressedInterface_Commit_Call) RunAndReturn(run func(context. return _c } -// GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, dbTx -func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, dbTx pgx.Tx) ([]types.MonitoredTx, error) { - ret := _m.Called(ctx, statuses, dbTx) +// GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, rollupID, dbTx +func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) { + ret := _m.Called(ctx, statuses, rollupID, dbTx) if len(ret) == 0 { panic("no return value specified for GetClaimTxsByStatus") @@ -187,19 +187,19 @@ func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, s var r0 []types.MonitoredTx var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) ([]types.MonitoredTx, error)); ok { - return rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)); ok { + return rf(ctx, statuses, rollupID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) []types.MonitoredTx); ok { - r0 = rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) []types.MonitoredTx); ok { + r0 = rf(ctx, statuses, rollupID, dbTx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]types.MonitoredTx) } } - if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) error); ok { - r1 = rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) error); ok { + r1 = rf(ctx, statuses, rollupID, dbTx) } else { r1 = ret.Error(1) } @@ -215,14 +215,15 @@ type StorageCompressedInterface_GetClaimTxsByStatus_Call struct { // GetClaimTxsByStatus is a helper method to define mock.On call // - ctx context.Context // - statuses []types.MonitoredTxStatus +// - rollupID uint // - dbTx pgx.Tx -func (_e *StorageCompressedInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, dbTx interface{}) *StorageCompressedInterface_GetClaimTxsByStatus_Call { - return &StorageCompressedInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, dbTx)} +func (_e *StorageCompressedInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, rollupID interface{}, dbTx interface{}) *StorageCompressedInterface_GetClaimTxsByStatus_Call { + return &StorageCompressedInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, rollupID, dbTx)} } -func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, dbTx pgx.Tx)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { +func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint), args[3].(pgx.Tx)) }) return _c } @@ -232,7 +233,7 @@ func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Return(_a0 []type return _c } -func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, pgx.Tx) ([]types.MonitoredTx, error)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { +func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/mocks/storage_interface.go b/claimtxman/mocks/storage_interface.go index 6e129d08..c7b2ee95 100644 --- a/claimtxman/mocks/storage_interface.go +++ b/claimtxman/mocks/storage_interface.go @@ -237,9 +237,9 @@ func (_c *StorageInterface_Commit_Call) RunAndReturn(run func(context.Context, p return _c } -// GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, dbTx -func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, dbTx pgx.Tx) ([]types.MonitoredTx, error) { - ret := _m.Called(ctx, statuses, dbTx) +// GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, rollupID, dbTx +func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) { + ret := _m.Called(ctx, statuses, rollupID, dbTx) if len(ret) == 0 { panic("no return value specified for GetClaimTxsByStatus") @@ -247,19 +247,19 @@ func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses [] var r0 []types.MonitoredTx var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) ([]types.MonitoredTx, error)); ok { - return rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)); ok { + return rf(ctx, statuses, rollupID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) []types.MonitoredTx); ok { - r0 = rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) []types.MonitoredTx); ok { + r0 = rf(ctx, statuses, rollupID, dbTx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]types.MonitoredTx) } } - if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, pgx.Tx) error); ok { - r1 = rf(ctx, statuses, dbTx) + if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) error); ok { + r1 = rf(ctx, statuses, rollupID, dbTx) } else { r1 = ret.Error(1) } @@ -275,14 +275,15 @@ type StorageInterface_GetClaimTxsByStatus_Call struct { // GetClaimTxsByStatus is a helper method to define mock.On call // - ctx context.Context // - statuses []types.MonitoredTxStatus +// - rollupID uint // - dbTx pgx.Tx -func (_e *StorageInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, dbTx interface{}) *StorageInterface_GetClaimTxsByStatus_Call { - return &StorageInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, dbTx)} +func (_e *StorageInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, rollupID interface{}, dbTx interface{}) *StorageInterface_GetClaimTxsByStatus_Call { + return &StorageInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, rollupID, dbTx)} } -func (_c *StorageInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, dbTx pgx.Tx)) *StorageInterface_GetClaimTxsByStatus_Call { +func (_c *StorageInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx)) *StorageInterface_GetClaimTxsByStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint), args[3].(pgx.Tx)) }) return _c } @@ -292,7 +293,7 @@ func (_c *StorageInterface_GetClaimTxsByStatus_Call) Return(_a0 []types.Monitore return _c } -func (_c *StorageInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, pgx.Tx) ([]types.MonitoredTx, error)) *StorageInterface_GetClaimTxsByStatus_Call { +func (_c *StorageInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)) *StorageInterface_GetClaimTxsByStatus_Call { _c.Call.Return(run) return _c } @@ -392,9 +393,9 @@ func (_c *StorageInterface_UpdateClaimTx_Call) RunAndReturn(run func(context.Con return _c } -// UpdateL1DepositsStatus provides a mock function with given fields: ctx, exitRoot, dbTx -func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, dbTx pgx.Tx) ([]*etherman.Deposit, error) { - ret := _m.Called(ctx, exitRoot, dbTx) +// UpdateL1DepositsStatus provides a mock function with given fields: ctx, exitRoot, destinationNetwork, dbTx +func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { + ret := _m.Called(ctx, exitRoot, destinationNetwork, dbTx) if len(ret) == 0 { panic("no return value specified for UpdateL1DepositsStatus") @@ -402,19 +403,19 @@ func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot var r0 []*etherman.Deposit var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, pgx.Tx) ([]*etherman.Deposit, error)); ok { - return rf(ctx, exitRoot, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint, pgx.Tx) ([]*etherman.Deposit, error)); ok { + return rf(ctx, exitRoot, destinationNetwork, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []byte, pgx.Tx) []*etherman.Deposit); ok { - r0 = rf(ctx, exitRoot, dbTx) + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint, pgx.Tx) []*etherman.Deposit); ok { + r0 = rf(ctx, exitRoot, destinationNetwork, dbTx) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*etherman.Deposit) } } - if rf, ok := ret.Get(1).(func(context.Context, []byte, pgx.Tx) error); ok { - r1 = rf(ctx, exitRoot, dbTx) + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint, pgx.Tx) error); ok { + r1 = rf(ctx, exitRoot, destinationNetwork, dbTx) } else { r1 = ret.Error(1) } @@ -430,14 +431,15 @@ type StorageInterface_UpdateL1DepositsStatus_Call struct { // UpdateL1DepositsStatus is a helper method to define mock.On call // - ctx context.Context // - exitRoot []byte +// - destinationNetwork uint // - dbTx pgx.Tx -func (_e *StorageInterface_Expecter) UpdateL1DepositsStatus(ctx interface{}, exitRoot interface{}, dbTx interface{}) *StorageInterface_UpdateL1DepositsStatus_Call { - return &StorageInterface_UpdateL1DepositsStatus_Call{Call: _e.mock.On("UpdateL1DepositsStatus", ctx, exitRoot, dbTx)} +func (_e *StorageInterface_Expecter) UpdateL1DepositsStatus(ctx interface{}, exitRoot interface{}, destinationNetwork interface{}, dbTx interface{}) *StorageInterface_UpdateL1DepositsStatus_Call { + return &StorageInterface_UpdateL1DepositsStatus_Call{Call: _e.mock.On("UpdateL1DepositsStatus", ctx, exitRoot, destinationNetwork, dbTx)} } -func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, dbTx pgx.Tx)) *StorageInterface_UpdateL1DepositsStatus_Call { +func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx)) *StorageInterface_UpdateL1DepositsStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]byte), args[2].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]byte), args[2].(uint), args[3].(pgx.Tx)) }) return _c } @@ -447,7 +449,7 @@ func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Return(_a0 []*etherman.D return _c } -func (_c *StorageInterface_UpdateL1DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, pgx.Tx) ([]*etherman.Deposit, error)) *StorageInterface_UpdateL1DepositsStatus_Call { +func (_c *StorageInterface_UpdateL1DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, uint, pgx.Tx) ([]*etherman.Deposit, error)) *StorageInterface_UpdateL1DepositsStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/monitor_compressed_txs.go b/claimtxman/monitor_compressed_txs.go index 686b9f14..f3830479 100644 --- a/claimtxman/monitor_compressed_txs.go +++ b/claimtxman/monitor_compressed_txs.go @@ -19,7 +19,7 @@ const ( ) type StorageCompressedInterface interface { - GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) + GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) GetMonitoredTxsGroups(ctx context.Context, groupIds []uint64, dbTx pgx.Tx) (map[uint64]ctmtypes.MonitoredTxGroupDBEntry, error) AddMonitoredTxsGroup(ctx context.Context, mTxGroup *ctmtypes.MonitoredTxGroupDBEntry, dbTx pgx.Tx) error @@ -49,6 +49,7 @@ type MonitorCompressedTxs struct { timeProvider utils.TimeProvider triggerGroups *GroupsTrigger gasOffset uint64 + rollupID uint } func NewMonitorCompressedTxs(ctx context.Context, @@ -59,7 +60,8 @@ func NewMonitorCompressedTxs(ctx context.Context, auth *bind.TransactOpts, etherMan EthermanI, timeProvider utils.TimeProvider, - gasOffset uint64) *MonitorCompressedTxs { + gasOffset uint64, + rollupID uint) *MonitorCompressedTxs { composer, err := NewComposeCompressClaim() if err != nil { log.Fatal("failed to create ComposeCompressClaim: %v", err) @@ -76,6 +78,7 @@ func NewMonitorCompressedTxs(ctx context.Context, timeProvider: timeProvider, triggerGroups: NewGroupsTrigger(cfg.GroupingClaims), gasOffset: gasOffset, + rollupID: rollupID, } } @@ -99,7 +102,7 @@ func (tm *MonitorCompressedTxs) getPendingTxs(ctx context.Context, dbTx pgx.Tx) statusesFilter := []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated, ctmtypes.MonitoredTxStatusCompressing, ctmtypes.MonitoredTxStatusClaiming} - mTxs, err := tm.storage.GetClaimTxsByStatus(ctx, statusesFilter, dbTx) + mTxs, err := tm.storage.GetClaimTxsByStatus(ctx, statusesFilter, tm.rollupID, dbTx) if err != nil { return PendingTxs{}, fmt.Errorf("failed to get get monitored txs: %v", err) } diff --git a/claimtxman/monitortxs.go b/claimtxman/monitortxs.go index af9ee204..15a494ff 100644 --- a/claimtxman/monitortxs.go +++ b/claimtxman/monitortxs.go @@ -25,6 +25,7 @@ type MonitorTxs struct { // client is the ethereum client l2Node *utils.Client cfg Config + rollupID uint nonceCache *NonceCache auth *bind.TransactOpts } @@ -34,8 +35,10 @@ func NewMonitorTxs(ctx context.Context, l2Node *utils.Client, cfg Config, nonceCache *NonceCache, + rollupID uint, auth *bind.TransactOpts) *MonitorTxs { return &MonitorTxs{ + rollupID: rollupID, storage: storage, ctx: ctx, l2Node: l2Node, @@ -53,22 +56,22 @@ func (tm *MonitorTxs) MonitorTxs(ctx context.Context) error { } statusesFilter := []ctmtypes.MonitoredTxStatus{ctmtypes.MonitoredTxStatusCreated} - mTxs, err := tm.storage.GetClaimTxsByStatus(ctx, statusesFilter, dbTx) + mTxs, err := tm.storage.GetClaimTxsByStatus(ctx, statusesFilter, tm.rollupID, dbTx) if err != nil { - log.Errorf("failed to get created monitored txs: %v", err) + log.Errorf("rollupID: %d, failed to get created monitored txs: %v", tm.rollupID, err) rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) + log.Errorf("rollupID: %d, claimtxman error rolling back state. RollbackErr: %s, err: %v", tm.rollupID, rollbackErr.Error(), err) return rollbackErr } - return fmt.Errorf("failed to get created monitored txs: %v", err) + return fmt.Errorf("rollupID: %d, failed to get created monitored txs: %v", tm.rollupID, err) } isResetNonce := false // it will reset the nonce in one cycle - log.Infof("found %v monitored tx to process", len(mTxs)) + log.Infof("rollupID: %d, found %v monitored tx to process", tm.rollupID, len(mTxs)) for _, mTx := range mTxs { mTx := mTx // force variable shadowing to avoid pointer conflicts - mTxLog := log.WithFields("monitoredTx", mTx.DepositID) + mTxLog := log.WithFields("monitoredTx", mTx.DepositID, "rollupID", tm.rollupID) mTxLog.Infof("processing tx with nonce %d", mTx.Nonce) // if the tx is not mined yet, check that not all the tx were mined and go to the next @@ -131,7 +134,7 @@ func (tm *MonitorTxs) MonitorTxs(ctx context.Context) error { } //Multiply gasPrice by 10 to increase the efficiency of the tx in the sequence mTx.GasPrice = big.NewInt(0).Mul(gasPrice, big.NewInt(10)) //nolint:gomnd - log.Infof("Using gasPrice: %s. The gasPrice suggested by the network is %s", mTx.GasPrice.String(), gasPrice.String()) + mTxLog.Infof("Using gasPrice: %s. The gasPrice suggested by the network is %s", mTx.GasPrice.String(), gasPrice.String()) // rebuild transaction tx := mTx.Tx() @@ -196,10 +199,10 @@ func (tm *MonitorTxs) MonitorTxs(ctx context.Context) error { err = tm.storage.Commit(tm.ctx, dbTx) if err != nil { - log.Errorf("UpdateClaimTx committing dbTx, err: %v", err) + log.Errorf("rollupID: %d, UpdateClaimTx committing dbTx, err: %v", tm.rollupID, err) rollbackErr := tm.storage.Rollback(tm.ctx, dbTx) if rollbackErr != nil { - log.Errorf("claimtxman error rolling back state. RollbackErr: %s, err: %v", rollbackErr.Error(), err) + log.Errorf("rollupID: %d, claimtxman error rolling back state. RollbackErr: %s, err: %v", tm.rollupID, rollbackErr.Error(), err) return rollbackErr } return err @@ -242,7 +245,7 @@ func (tm *MonitorTxs) checkTxHistory(ctx context.Context, mTx ctmtypes.Monitored continue } } - log.Infof("tx: %s not mined yet", txHash.String()) + mTxLog.Infof("tx: %s not mined yet", txHash.String()) allHistoryTxMined = false continue @@ -263,7 +266,7 @@ func (tm *MonitorTxs) checkTxHistory(ctx context.Context, mTx ctmtypes.Monitored // accordingly to the current information stored and the current // state of the blockchain func (tm *MonitorTxs) ReviewMonitoredTx(ctx context.Context, mTx *ctmtypes.MonitoredTx, reviewNonce bool) error { - mTxLog := log.WithFields("monitoredTx", mTx.DepositID) + mTxLog := log.WithFields("monitoredTx", mTx.DepositID, "rollupID", tm.rollupID) mTxLog.Debug("reviewing") // get gas tx := ethereum.CallMsg{ diff --git a/claimtxman/types/monitoredtx.go b/claimtxman/types/monitoredtx.go index 18ea3a32..a2265cda 100644 --- a/claimtxman/types/monitoredtx.go +++ b/claimtxman/types/monitoredtx.go @@ -51,7 +51,7 @@ func (s MonitoredTxStatus) String() string { // plus information to monitor if the transactions was sent successfully type MonitoredTx struct { // DepositID is the tx identifier controller by the caller - DepositID uint + DepositID uint64 // From is a sender of the tx, used to identify which private key should be used to sing the tx From common.Address @@ -171,7 +171,7 @@ func NewMonitoredTxGroup(entry MonitoredTxGroupDBEntry, txs []MonitoredTx) Monit return res } -func (m MonitoredTxGroup) GetTxByDepositID(depositID uint) *MonitoredTx { +func (m MonitoredTxGroup) GetTxByDepositID(depositID uint64) *MonitoredTx { for idx := range m.Txs { if m.Txs[idx].DepositID == depositID { return &m.Txs[idx] diff --git a/cmd/run.go b/cmd/run.go index 508d3199..a3126d83 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -54,20 +54,12 @@ func start(ctx *cli.Context) error { return err } - networkID, err := l1Etherman.GetNetworkID(ctx.Context) + networkID := l1Etherman.GetNetworkID() log.Infof("main network id: %d", networkID) - if err != nil { - log.Error(err) - return err - } var networkIDs = []uint{networkID} for _, client := range l2Ethermans { - networkID, err := client.GetNetworkID(ctx.Context) - if err != nil { - log.Error(err) - return err - } + networkID := client.GetNetworkID() log.Infof("l2 network id: %d", networkID) networkIDs = append(networkIDs, networkID) } @@ -96,24 +88,38 @@ func start(ctx *cli.Context) error { log.Error(err) return err } - rollupID := l1Etherman.GetRollupID() - bridgeService := server.NewBridgeService(c.BridgeServer, c.BridgeController.Height, networkIDs, apiStorage, rollupID) + bridgeService := server.NewBridgeService(c.BridgeServer, c.BridgeController.Height, networkIDs, apiStorage) err = server.RunServer(c.BridgeServer, bridgeService) if err != nil { log.Error(err) return err } - log.Debug("trusted sequencer URL ", c.Etherman.L2URLs[0]) - zkEVMClient := client.NewClient(c.Etherman.L2URLs[0]) - chExitRootEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + var chsExitRootEvent []chan *etherman.GlobalExitRoot + var chsSyncedL2 []chan uint logVersion() - go runSynchronizer(ctx.Context, c.NetworkConfig.GenBlockNumber, bridgeController, l1Etherman, c.Synchronizer, storage, zkEVMClient, chExitRootEvent, chSynced) - for _, client := range l2Ethermans { - go runSynchronizer(ctx.Context, 0, bridgeController, client, c.Synchronizer, storage, zkEVMClient, chExitRootEvent, chSynced) + for i, l2EthermanClient := range l2Ethermans { + log.Debug("trusted sequencer URL ", c.Etherman.L2URLs[i]) + zkEVMClient := client.NewClient(c.Etherman.L2URLs[i]) + chExitRootEventL2 := make(chan *etherman.GlobalExitRoot) + chSyncedL2 := make(chan uint) + chsExitRootEvent = append(chsExitRootEvent, chExitRootEventL2) + chsSyncedL2 = append(chsSyncedL2, chSyncedL2) + go runSynchronizer(ctx.Context, 0, bridgeController, l2EthermanClient, c.Synchronizer, storage, zkEVMClient, chExitRootEventL2, nil, chSyncedL2) } - + chSynced := make(chan uint) + go runSynchronizer(ctx.Context, c.NetworkConfig.GenBlockNumber, bridgeController, l1Etherman, c.Synchronizer, storage, nil, nil, chsExitRootEvent, chSynced) + go func() { + for { + select { + case netID := <-chSynced: + log.Debug("NetworkID synced: ", netID) + case <-ctx.Done(): + log.Debug("Stopping goroutine that listen new GER updates") + return + } + } + }() if c.ClaimTxManager.Enabled { for i := 0; i < len(c.Etherman.L2URLs); i++ { // we should match the orders of L2URLs between etherman and claimtxman @@ -131,8 +137,8 @@ func start(ctx *cli.Context) error { if err != nil { log.Fatalf("error creating signer for L2 %s. Error: %v", c.Etherman.L2URLs[i], err) } - - claimTxManager, err := claimtxman.NewClaimTxManager(ctx, c.ClaimTxManager, chExitRootEvent, chSynced, + rollupID := l2Ethermans[i].GetNetworkID() // RollupID == networkID + claimTxManager, err := claimtxman.NewClaimTxManager(ctx, c.ClaimTxManager, chsExitRootEvent[i], chsSyncedL2[i], c.Etherman.L2URLs[i], networkIDs[i+1], c.NetworkConfig.L2PolygonBridgeAddresses[i], bridgeService, storage, rollupID, l2Ethermans[i], nonceCache, auth) if err != nil { log.Fatalf("error creating claim tx manager for L2 %s. Error: %v", c.Etherman.L2URLs[i], err) @@ -141,19 +147,9 @@ func start(ctx *cli.Context) error { } } else { log.Warn("ClaimTxManager not configured") - go func() { - for { - select { - case <-chExitRootEvent: - log.Debug("New GER received") - case netID := <-chSynced: - log.Debug("NetworkID synced: ", netID) - case <-ctx.Context.Done(): - log.Debug("Stopping goroutine that listen new GER updates") - return - } - } - }() + for i := range chsExitRootEvent { + monitorChannel(ctx.Context, chsExitRootEvent[i], chsSyncedL2[i]) + } } // Wait for an in interrupt. @@ -168,12 +164,26 @@ func setupLog(c log.Config) { log.Init(c) } +func monitorChannel(ctx context.Context, chExitRootEvent chan *etherman.GlobalExitRoot, chSynced chan uint) { + go func() { + for { + select { + case <-chExitRootEvent: + log.Debug("New GER received") + case netID := <-chSynced: + log.Debug("NetworkID synced: ", netID) + case <-ctx.Done(): + log.Debug("Stopping goroutine that listen new GER updates") + return + } + } + }() +} func newEthermans(c *config.Config) (*etherman.Client, []*etherman.Client, error) { l1Etherman, err := etherman.NewClient(c.Etherman, c.NetworkConfig.PolygonBridgeAddress, c.NetworkConfig.PolygonZkEVMGlobalExitRootAddress, - c.NetworkConfig.PolygonRollupManagerAddress, - c.NetworkConfig.PolygonZkEvmAddress) + c.NetworkConfig.PolygonRollupManagerAddress) if err != nil { log.Error("L1 etherman error: ", err) return nil, nil, err @@ -193,8 +203,8 @@ func newEthermans(c *config.Config) (*etherman.Client, []*etherman.Client, error return l1Etherman, l2Ethermans, nil } -func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEvent chan *etherman.GlobalExitRoot, chSynced chan uint) { - sy, err := synchronizer.NewSynchronizer(ctx, storage, brdigeCtrl, etherman, zkEVMClient, genBlockNumber, chExitRootEvent, chSynced, cfg) +func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint) { + sy, err := synchronizer.NewSynchronizer(ctx, storage, brdigeCtrl, etherman, zkEVMClient, genBlockNumber, chExitRootEventL2, chsExitRootEvent, chSynced, cfg) if err != nil { log.Fatal(err) } diff --git a/config/config.debug.toml b/config/config.debug.toml index 8a4c4208..acea5610 100644 --- a/config/config.debug.toml +++ b/config/config.debug.toml @@ -62,6 +62,5 @@ GenBlockNumber = 0 PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" -PolygonZkEvmAddress = "0x8dAF17A20c9DBA35f005b6324F493785D239719d" L2ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" L2PolygonBridgeAddresses = ["0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"] diff --git a/config/config.local.toml b/config/config.local.toml index cafb8a1f..274191c0 100644 --- a/config/config.local.toml +++ b/config/config.local.toml @@ -62,6 +62,5 @@ GenBlockNumber = 0 PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" -PolygonZkEvmAddress = "0x8dAF17A20c9DBA35f005b6324F493785D239719d" L2ClaimCompressorAddress = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6" L2PolygonBridgeAddresses = ["0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"] diff --git a/config/network.go b/config/network.go index 58e629bf..958b4909 100644 --- a/config/network.go +++ b/config/network.go @@ -11,7 +11,6 @@ type NetworkConfig struct { PolygonBridgeAddress common.Address PolygonZkEVMGlobalExitRootAddress common.Address PolygonRollupManagerAddress common.Address - PolygonZkEvmAddress common.Address L2ClaimCompressorAddress common.Address L2PolygonBridgeAddresses []common.Address } @@ -28,7 +27,6 @@ var ( PolygonBridgeAddress: common.HexToAddress("0x2a3DD3EB832aF982ec71669E178424b10Dca2EDe"), PolygonZkEVMGlobalExitRootAddress: common.HexToAddress("0x580bda1e7A0CFAe92Fa7F6c20A3794F169CE3CFb"), PolygonRollupManagerAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), - PolygonZkEvmAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2ClaimCompressorAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2PolygonBridgeAddresses: []common.Address{common.HexToAddress("0x2a3DD3EB832aF982ec71669E178424b10Dca2EDe")}, }, @@ -38,7 +36,6 @@ var ( PolygonBridgeAddress: common.HexToAddress("0xF6BEEeBB578e214CA9E23B0e9683454Ff88Ed2A7"), PolygonZkEVMGlobalExitRootAddress: common.HexToAddress("0x4d9427DCA0406358445bC0a8F88C26b704004f74"), PolygonRollupManagerAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), - PolygonZkEvmAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2ClaimCompressorAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2PolygonBridgeAddresses: []common.Address{common.HexToAddress("0xF6BEEeBB578e214CA9E23B0e9683454Ff88Ed2A7")}, }, @@ -47,7 +44,6 @@ var ( PolygonBridgeAddress: common.HexToAddress("0x47c1090bc966280000Fe4356a501f1D0887Ce840"), PolygonZkEVMGlobalExitRootAddress: common.HexToAddress("0xA379Dd55Eb12e8FCdb467A814A15DE2b29677066"), PolygonRollupManagerAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), - PolygonZkEvmAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2ClaimCompressorAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), L2PolygonBridgeAddresses: []common.Address{common.HexToAddress("0xfC5b0c5F677a3f3E29DB2e98c9eD455c7ACfCf03")}, }, @@ -56,7 +52,6 @@ var ( PolygonBridgeAddress: common.HexToAddress("0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"), PolygonZkEVMGlobalExitRootAddress: common.HexToAddress("0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"), PolygonRollupManagerAddress: common.HexToAddress("0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e"), - PolygonZkEvmAddress: common.HexToAddress("0x8dAF17A20c9DBA35f005b6324F493785D239719d"), L2ClaimCompressorAddress: common.HexToAddress("0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6"), L2PolygonBridgeAddresses: []common.Address{common.HexToAddress("0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E")}, }, diff --git a/db/pgstorage/migrations/0012.sql b/db/pgstorage/migrations/0012.sql new file mode 100644 index 00000000..803ff2bb --- /dev/null +++ b/db/pgstorage/migrations/0012.sql @@ -0,0 +1,12 @@ +-- +migrate Up +DELETE FROM sync.exit_root WHERE block_id = 0; -- This will clean up old and unnecessary values +ALTER TABLE sync.exit_root ADD COLUMN network_id INTEGER NOT NULL DEFAULT 0; + +ALTER TABLE IF EXISTS sync.exit_root DROP CONSTRAINT IF EXISTS UC; +ALTER TABLE IF EXISTS sync.exit_root ADD CONSTRAINT UC UNIQUE(block_id, global_exit_root, network_id); + +-- +migrate Down +ALTER TABLE sync.exit_root DROP COLUMN network_id; + +ALTER TABLE IF EXISTS sync.exit_root DROP CONSTRAINT IF EXISTS UC; +ALTER TABLE IF EXISTS sync.exit_root ADD CONSTRAINT UC UNIQUE(block_id, global_exit_root); diff --git a/db/pgstorage/migrations/0012_test.go b/db/pgstorage/migrations/0012_test.go new file mode 100644 index 00000000..439e7156 --- /dev/null +++ b/db/pgstorage/migrations/0012_test.go @@ -0,0 +1,144 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0012 struct{} + +const ( + initialInsertSQL = ` + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(1, decode('717E05DE47A87A7D1679E183F1C224150675F6302B7DA4EAAB526B2B91AE0761','hex'), '{decode(''5C7830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(5, decode('3FB985977CDE5D64E1579F17E55F1C5C346335969053E9D879E59035A48DAE0F','hex'), '{decode(''5C7838356636613362316238383961333263393739333334363962306438613636643634376439353864623463326661623532663638636361383139346633363133'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(6, decode('739666C7E4A5A2A3F96A0FAD10D2FE6BF92E332C8D6F82501D0075D8072CAF61','hex'), '{decode(''5C7864633663663334353439343739366564613863376439633335363164626164383033656539343466363337386130333361373266353231653831393063346131'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(7, decode('B60DF164428EF67624205078CDB74DD55308871CA1B8EED09F0372B87061CDE5','hex'), '{decode(''5C7833333638393438613133383866623564303662303230613333376262643732663238313662633533376338663230373632336639663161363132333337366361'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(8, decode('A31D7BCB797E2EFA7172F1B068E8CA29DC48EAD6091E57B3BF877EECA21A7A59','hex'), '{decode(''5C7832636438363562623761356635356465373537643236623630643434333965336465626265373962333061396436633936393238613839353639373163366432'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(0, decode('3FB985977CDE5D64E1579F17E55F1C5C346335969053E9D879E59035A48DAE0F','hex'), '{decode(''5C7838356636613362316238383961333263393739333334363962306438613636643634376439353864623463326661623532663638636361383139346633363133'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}'); + ` + insertBlocks = `INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(1, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C2','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000'); + INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(5, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C3','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000'); + INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(6, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C4','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000'); + INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(7, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C5','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000'); + INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(8, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C6','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000'); + ` +) + +func (m migrationTest0012) InsertData(db *sql.DB) error { + if _, err := db.Exec(insertBlocks); err != nil { + return err + } + if _, err := db.Exec(initialInsertSQL); err != nil { + return err + } + return nil +} + +func (m migrationTest0012) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + selectBlockID := `SELECT count(*) FROM sync.exit_root where block_id = 0;` + var count int + err := db.QueryRow(selectBlockID).Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + selectNetworkID := `SELECT network_id FROM sync.exit_root where block_id = 1;` + err = db.QueryRow(selectNetworkID).Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + insertGER := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots, network_id) + VALUES(5, decode('3FB985977CDE5D64E1579F17E55F1C5C346335969053E9D879E59035A48DAE0A','hex'), '{decode(''5C7838356636613362316238383961333263393739333334363962306438613636643634376439353864623463326661623532663638636361383139346633363133'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}', 1); + ` + _, err = db.Exec(insertGER) + assert.NoError(t, err) + + selectNetworkID = `SELECT network_id FROM sync.exit_root where block_id = 5 AND network_id = 1;` + err = db.QueryRow(selectNetworkID).Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 1, count) + + insertTrustedGER := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots, network_id) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}', 2); + ` + _, err = db.Exec(insertTrustedGER) + assert.NoError(t, err) + insertTrustedGER2 := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots, network_id) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}', 2); + ` + _, err = db.Exec(insertTrustedGER2) + assert.Error(t, err) + insertTrustedGER3 := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots, network_id) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}', 3); + ` + _, err = db.Exec(insertTrustedGER3) + assert.NoError(t, err) + emptyGERTable := `DELETE FROM sync.exit_root;` + _, err = db.Exec(emptyGERTable) + assert.NoError(t, err) +} + +func (m migrationTest0012) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + selectBlockID := `SELECT count(*) FROM sync.exit_root where block_id = 0;` + var count int + err := db.QueryRow(selectBlockID).Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 0, count) + + selectNetworkID := `SELECT network_id FROM sync.exit_root where block_id = 1;` + err = db.QueryRow(selectNetworkID).Scan(&count) + assert.Error(t, err) + + insertGER := `INSERT INTO sync.exit_root + (id, block_id, global_exit_root, exit_roots, network_id) + VALUES(11, 5, decode('3FB985977CDE5D64E1579F17E55F1C5C346335969053E9D879E59035A48DAE0B','hex'), '{decode(''5C7838356636613362316238383961333263393739333334363962306438613636643634376439353864623463326661623532663638636361383139346633363133'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}', 1); + ` + _, err = db.Exec(insertGER) + assert.Error(t, err) + + insertTrustedGER := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots, network_id) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}', 2); + ` + _, err = db.Exec(insertTrustedGER) + assert.Error(t, err) + insertTrustedGER2 := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}'); + ` + _, err = db.Exec(insertTrustedGER2) + assert.NoError(t, err) + insertTrustedGER3 := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3A','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}'); + ` + _, err = db.Exec(insertTrustedGER3) + assert.Error(t, err) + insertTrustedGER4 := `INSERT INTO sync.exit_root + (block_id, global_exit_root, exit_roots) + VALUES(0, decode('BA95503878F0B150C40DE1F47536598315A020398A1F4FD61831E740FED25E3B','hex'), '{decode(''5C7834376463646439656133303936633866366335326363363961616566663830343361616431353032643232383639656664646664666435313635613233313638'',''hex''),decode(''5C7861316336343137643833393536396530613738663636636435363836656433623436343464613466613964373664396665636662653061376534646465383630'',''hex'')}'); + ` + _, err = db.Exec(insertTrustedGER4) + assert.NoError(t, err) +} + +func TestMigration0012(t *testing.T) { + runMigrationTest(t, 12, migrationTest0012{}) +} diff --git a/db/pgstorage/pgstorage.go b/db/pgstorage/pgstorage.go index 4c456c65..f40879b5 100644 --- a/db/pgstorage/pgstorage.go +++ b/db/pgstorage/pgstorage.go @@ -190,10 +190,10 @@ func (p *PostgresStorage) GetNumberDeposits(ctx context.Context, networkID uint, // AddTrustedGlobalExitRoot adds new global exit root which comes from the trusted sequencer. func (p *PostgresStorage) AddTrustedGlobalExitRoot(ctx context.Context, trustedExitRoot *etherman.GlobalExitRoot, dbTx pgx.Tx) (bool, error) { const addTrustedGerSQL = ` - INSERT INTO sync.exit_root (block_id, global_exit_root, exit_roots) - VALUES (0, $1, $2) + INSERT INTO sync.exit_root (block_id, global_exit_root, exit_roots, network_id) + VALUES (0, $1, $2, $3) ON CONFLICT ON CONSTRAINT UC DO NOTHING;` - res, err := p.getExecQuerier(dbTx).Exec(ctx, addTrustedGerSQL, trustedExitRoot.GlobalExitRoot, pq.Array([][]byte{trustedExitRoot.ExitRoots[0][:], trustedExitRoot.ExitRoots[1][:]})) + res, err := p.getExecQuerier(dbTx).Exec(ctx, addTrustedGerSQL, trustedExitRoot.GlobalExitRoot, pq.Array([][]byte{trustedExitRoot.ExitRoots[0][:], trustedExitRoot.ExitRoots[1][:]}), trustedExitRoot.NetworkID) return res.RowsAffected() > 0, err } @@ -243,8 +243,8 @@ func (p *PostgresStorage) GetDeposit(ctx context.Context, depositCounterUser uin deposit etherman.Deposit amount string ) - const getDepositSQL = "SELECT leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, b.block_num, d.network_id, tx_hash, metadata, ready_for_claim FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE d.network_id = $1 AND deposit_cnt = $2" - err := p.getExecQuerier(dbTx).QueryRow(ctx, getDepositSQL, networkID, depositCounterUser).Scan(&deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) + const getDepositSQL = "SELECT d.id, leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, b.block_num, d.network_id, tx_hash, metadata, ready_for_claim FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE d.network_id = $1 AND deposit_cnt = $2" + err := p.getExecQuerier(dbTx).QueryRow(ctx, getDepositSQL, networkID, depositCounterUser).Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) if errors.Is(err, pgx.ErrNoRows) { return nil, gerror.ErrStorageNotFound } @@ -254,9 +254,9 @@ func (p *PostgresStorage) GetDeposit(ctx context.Context, depositCounterUser uin } // GetLatestExitRoot gets the latest global exit root. -func (p *PostgresStorage) GetLatestExitRoot(ctx context.Context, isRollup bool, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { - if !isRollup { - return p.GetLatestTrustedExitRoot(ctx, dbTx) +func (p *PostgresStorage) GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { + if networkID == 0 { + return p.GetLatestTrustedExitRoot(ctx, destNetwork, dbTx) } return p.GetLatestL1SyncedExitRoot(ctx, dbTx) @@ -268,7 +268,7 @@ func (p *PostgresStorage) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pg ger etherman.GlobalExitRoot exitRoots [][]byte ) - const getLatestL1SyncedExitRootSQL = "SELECT block_id, global_exit_root, exit_roots FROM sync.exit_root WHERE block_id > 0 ORDER BY id DESC LIMIT 1" + const getLatestL1SyncedExitRootSQL = "SELECT block_id, global_exit_root, exit_roots FROM sync.exit_root WHERE block_id > 0 AND network_id = 0 ORDER BY id DESC LIMIT 1" err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestL1SyncedExitRootSQL).Scan(&ger.BlockID, &ger.GlobalExitRoot, pq.Array(&exitRoots)) if err != nil { if errors.Is(err, pgx.ErrNoRows) { @@ -299,13 +299,13 @@ func (p *PostgresStorage) GetExitRootByGER(ctx context.Context, ger common.Hash, } // GetLatestTrustedExitRoot gets the latest trusted global exit root. -func (p *PostgresStorage) GetLatestTrustedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { +func (p *PostgresStorage) GetLatestTrustedExitRoot(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { var ( ger etherman.GlobalExitRoot exitRoots [][]byte ) - const getLatestTrustedExitRootSQL = "SELECT global_exit_root, exit_roots FROM sync.exit_root WHERE block_id = 0 ORDER BY id DESC LIMIT 1" - err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestTrustedExitRootSQL).Scan(&ger.GlobalExitRoot, pq.Array(&exitRoots)) + const getLatestTrustedExitRootSQL = "SELECT global_exit_root, exit_roots FROM sync.exit_root WHERE block_id = 0 AND network_id = $1 ORDER BY id DESC LIMIT 1" + err := p.getExecQuerier(dbTx).QueryRow(ctx, getLatestTrustedExitRootSQL, networkID).Scan(&ger.GlobalExitRoot, pq.Array(&exitRoots)) if err != nil { if errors.Is(err, pgx.ErrNoRows) { return nil, gerror.ErrStorageNotFound @@ -464,22 +464,6 @@ func (p *PostgresStorage) IsRollupExitRoot(ctx context.Context, root common.Hash return false, nil } -// IsLxLyActivated checks in db if LxLy is activated -func (p *PostgresStorage) IsLxLyActivated(ctx context.Context, dbTx pgx.Tx) (bool, error) { - const getLeavesSQL = "SELECT count(*) FROM mt.rollup_exit" - var count int - err := p.getExecQuerier(dbTx).QueryRow(ctx, getLeavesSQL).Scan(&count) - if errors.Is(err, pgx.ErrNoRows) { - return false, gerror.ErrStorageNotFound - } else if err != nil { - return false, err - } - if count > 0 { - return true, nil - } - return false, nil -} - // GetLatestRollupExitLeaves gets the latest leaves of the rollupExitTree func (p *PostgresStorage) GetLatestRollupExitLeaves(ctx context.Context, dbTx pgx.Tx) ([]etherman.RollupExitLeaf, error) { const getLeavesSQL = `SELECT distinct re.id, re.leaf, re.rollup_id, re.root, re.block_id @@ -559,7 +543,7 @@ func (p *PostgresStorage) GetClaims(ctx context.Context, destAddr string, limit // GetDeposits gets the deposit list which be smaller than depositCount. func (p *PostgresStorage) GetDeposits(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { - const getDepositsSQL = "SELECT leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, b.block_num, d.network_id, tx_hash, metadata, ready_for_claim FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE dest_addr = $1 ORDER BY d.block_id DESC, d.deposit_cnt DESC LIMIT $2 OFFSET $3" + const getDepositsSQL = "SELECT d.id, leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, b.block_num, d.network_id, tx_hash, metadata, ready_for_claim FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE dest_addr = $1 ORDER BY d.block_id DESC, d.deposit_cnt DESC LIMIT $2 OFFSET $3" rows, err := p.getExecQuerier(dbTx).Query(ctx, getDepositsSQL, common.FromHex(destAddr), limit, offset) if err != nil { return nil, err @@ -572,7 +556,7 @@ func (p *PostgresStorage) GetDeposits(ctx context.Context, destAddr string, limi deposit etherman.Deposit amount string ) - err = rows.Scan(&deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) + err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.BlockNumber, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) if err != nil { return nil, err } @@ -591,21 +575,14 @@ func (p *PostgresStorage) GetDepositCount(ctx context.Context, destAddr string, return depositCount, err } -// UpdateBlocksForTesting updates the hash of blocks. -func (p *PostgresStorage) UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64, dbTx pgx.Tx) error { - const updateBlocksSQL = "UPDATE sync.block SET block_hash = SUBSTRING(block_hash FROM 1 FOR LENGTH(block_hash)-1) || '\x61' WHERE network_id = $1 AND block_num >= $2" - _, err := p.getExecQuerier(dbTx).Exec(ctx, updateBlocksSQL, networkID, blockNum) - return err -} - // UpdateL1DepositsStatus updates the ready_for_claim status of L1 deposits. -func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, dbTx pgx.Tx) ([]*etherman.Deposit, error) { +func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { const updateDepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true WHERE deposit_cnt <= (SELECT sync.deposit.deposit_cnt FROM mt.root INNER JOIN sync.deposit ON sync.deposit.id = mt.root.deposit_id WHERE mt.root.root = $1 AND mt.root.network = 0) - AND network_id = 0 AND ready_for_claim = false - RETURNING leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, network_id, tx_hash, metadata, ready_for_claim;` - rows, err := p.getExecQuerier(dbTx).Query(ctx, updateDepositsStatusSQL, exitRoot) + AND network_id = 0 AND ready_for_claim = false AND dest_net = $2 + RETURNING id, leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, network_id, tx_hash, metadata, ready_for_claim;` + rows, err := p.getExecQuerier(dbTx).Query(ctx, updateDepositsStatusSQL, exitRoot, destinationNetwork) if err != nil { return nil, err } @@ -616,7 +593,7 @@ func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot [ deposit etherman.Deposit amount string ) - err = rows.Scan(&deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) + err = rows.Scan(&deposit.Id, &deposit.LeafType, &deposit.OriginalNetwork, &deposit.OriginalAddress, &amount, &deposit.DestinationNetwork, &deposit.DestinationAddress, &deposit.DepositCount, &deposit.BlockID, &deposit.NetworkID, &deposit.TxHash, &deposit.Metadata, &deposit.ReadyForClaim) if err != nil { return nil, err } @@ -666,9 +643,9 @@ func (p *PostgresStorage) UpdateClaimTx(ctx context.Context, mTx ctmtypes.Monito } // GetClaimTxsByStatus gets the monitored transactions by status. -func (p *PostgresStorage) GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) { - const getMonitoredTxsSQL = "SELECT deposit_id, from_addr, to_addr, nonce, value, data, gas, status, history, created_at, updated_at, group_id, global_exit_root FROM sync.monitored_txs WHERE status = ANY($1) ORDER BY created_at ASC" - rows, err := p.getExecQuerier(dbTx).Query(ctx, getMonitoredTxsSQL, pq.Array(statuses)) +func (p *PostgresStorage) GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) { + const getMonitoredTxsSQL = "SELECT deposit_id, from_addr, to_addr, nonce, value, data, gas, status, history, created_at, updated_at, group_id, global_exit_root FROM sync.monitored_txs INNER JOIN sync.deposit ON sync.deposit.id = sync.monitored_txs.deposit_id WHERE status = ANY($1) AND sync.deposit.dest_net = $2 ORDER BY created_at ASC" + rows, err := p.getExecQuerier(dbTx).Query(ctx, getMonitoredTxsSQL, pq.Array(statuses), rollupID) if errors.Is(err, pgx.ErrNoRows) { return []ctmtypes.MonitoredTx{}, nil } else if err != nil { @@ -703,3 +680,10 @@ func (p *PostgresStorage) UpdateDepositsStatusForTesting(ctx context.Context, db _, err := p.getExecQuerier(dbTx).Exec(ctx, updateDepositsStatusSQL) return err } + +// UpdateBlocksForTesting updates the hash of blocks. +func (p *PostgresStorage) UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64, dbTx pgx.Tx) error { + const updateBlocksSQL = "UPDATE sync.block SET block_hash = SUBSTRING(block_hash FROM 1 FOR LENGTH(block_hash)-1) || '\x61' WHERE network_id = $1 AND block_num >= $2" + _, err := p.getExecQuerier(dbTx).Exec(ctx, updateBlocksSQL, networkID, blockNum) + return err +} diff --git a/db/pgstorage/pgstorage_test.go b/db/pgstorage/pgstorage_test.go index 064592a3..a15f85f1 100644 --- a/db/pgstorage/pgstorage_test.go +++ b/db/pgstorage/pgstorage_test.go @@ -87,36 +87,6 @@ func TestGetLeaves(t *testing.T) { assert.Equal(t, "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25722", leaves[3].Root.String()) } -func TestIsLxLyActivated(t *testing.T) { - data := `INSERT INTO sync.block - (id, block_num, block_hash, parent_hash, network_id, received_at) - VALUES(1, 1, decode('5C7831','hex'), decode('5C7830','hex'), 0, '1970-01-01 01:00:00.000'); - - INSERT INTO mt.rollup_exit - (leaf, rollup_id, root, block_id) - VALUES(decode('A4BFA0908DC7B06D98DA4309F859023D6947561BC19BC00D77F763DEA1A0B9F5','hex'), 1, decode('42D3339FE8EB57770953423F20A029E778A707E8D58AAF110B40D5EB4DD25721','hex'), 1); - ` - dbCfg := NewConfigFromEnv() - ctx := context.Background() - err := InitOrReset(dbCfg) - require.NoError(t, err) - - store, err := NewPostgresStorage(dbCfg) - require.NoError(t, err) - - isActivated, err := store.IsLxLyActivated(ctx, nil) - require.NoError(t, err) - assert.Equal(t, false, isActivated) - - _, err = store.Exec(ctx, data) - require.NoError(t, err) - - isActivated, err = store.IsLxLyActivated(ctx, nil) - require.NoError(t, err) - - assert.Equal(t, true, isActivated) -} - func TestIsRollupExitRoot(t *testing.T) { data := `INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) diff --git a/db/storage_test.go b/db/storage_test.go index 29e79a8b..343c1065 100644 --- a/db/storage_test.go +++ b/db/storage_test.go @@ -84,7 +84,7 @@ func TestL1GlobalExitRoot(t *testing.T) { require.Equal(t, ger.BlockID, l1GER.BlockID) require.Equal(t, ger.GlobalExitRoot, l1GER.GlobalExitRoot) - latestGER, err := pg.GetLatestExitRoot(ctx, true, tx) + latestGER, err := pg.GetLatestExitRoot(ctx, 1, 0, tx) require.NoError(t, err) require.Equal(t, latestGER.GlobalExitRoot, l1GER.GlobalExitRoot) require.Equal(t, latestGER.BlockNumber, l1GER.BlockNumber) @@ -106,6 +106,7 @@ func TestAddTrustedGERDuplicated(t *testing.T) { require.NoError(t, err) ger := ðerman.GlobalExitRoot{ + NetworkID: 1, ExitRoots: []common.Hash{common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"), common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1")}, GlobalExitRoot: common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"), } @@ -129,6 +130,7 @@ func TestAddTrustedGERDuplicated(t *testing.T) { require.NoError(t, err) ger1 := ðerman.GlobalExitRoot{ + NetworkID: 1, ExitRoots: []common.Hash{common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f2"), common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f2")}, GlobalExitRoot: common.HexToHash("0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f2"), } @@ -146,11 +148,20 @@ func TestAddTrustedGERDuplicated(t *testing.T) { require.NoError(t, err) assert.Equal(t, 2, result) - tGER, err := pg.GetLatestTrustedExitRoot(ctx, tx) + _, err = pg.AddBlock(ctx, ðerman.Block{ + BlockNumber: 1, + }, tx) + require.NoError(t, err) + ger2 := ger1 + ger2.BlockID = 1 + err = pg.AddGlobalExitRoot(ctx, ger2, tx) + require.NoError(t, err) + + tGER, err := pg.GetLatestTrustedExitRoot(ctx, 1, tx) require.NoError(t, err) require.Equal(t, tGER.GlobalExitRoot, ger1.GlobalExitRoot) - latestGER, err := pg.GetLatestExitRoot(ctx, false, tx) + latestGER, err := pg.GetLatestExitRoot(ctx, 0, 1, tx) require.NoError(t, err) require.Equal(t, latestGER.GlobalExitRoot, ger1.GlobalExitRoot) require.Equal(t, latestGER.BlockNumber, ger1.BlockNumber) diff --git a/docker-compose.yml b/docker-compose.yml index e0b4a70a..0e4c7a52 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -230,9 +230,9 @@ services: - ZKEVM_BRIDGE_BRIDGESERVER_DB_PORT=5432 - ZKEVM_BRIDGE_ETHERMAN_L1URL=http://zkevm-v1tov2-l1-network:8545 - ZKEVM_BRIDGE_ETHERMAN_L2URLS=http://zkevm-node-v1tov2:8123 - - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMADDRESS=0x3Aa5ebB10DC797CAC828524e59A333d0A371443c - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONBRIDGEADDRESS=0x80a540502706aa690476D5534e26939894559c05 - ZKEVM_BRIDGE_NETWORKCONFIG_L2POLYGONBRIDGEADDRESSES=0x80a540502706aa690476D5534e26939894559c05 + - ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED} volumes: - ./test/test.keystore.claimtx:/pk/keystore.claimtxmanager - ./config/config.local.toml:/app/config.toml @@ -409,9 +409,9 @@ services: - ZKEVM_BRIDGE_ETHERMAN_L2URLS=http://zkevm-node-1:8123 - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMGLOBALEXITROOTADDRESS=0x610178dA211FEF7D417bC0e6FeD39F05609AD788 - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONROLLUPMANAGERADDRESS=0xA51c1fc2f0D1a1b8494Ed1FE312d7C3a78Ed91C0 - - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMADDRESS=0x1F708C24a0D3A740cD47cC0444E9480899f3dA7D - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONBRIDGEADDRESS=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E - ZKEVM_BRIDGE_NETWORKCONFIG_L2POLYGONBRIDGEADDRESSES=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E + - ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED} volumes: - ./test/test.keystore.claimtx:/pk/keystore.claimtxmanager - ./config/config.local.toml:/app/config.toml @@ -441,9 +441,9 @@ services: - ZKEVM_BRIDGE_ETHERMAN_L2URLS=http://zkevm-node-2:8124 - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMGLOBALEXITROOTADDRESS=0x610178dA211FEF7D417bC0e6FeD39F05609AD788 - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONROLLUPMANAGERADDRESS=0xA51c1fc2f0D1a1b8494Ed1FE312d7C3a78Ed91C0 - - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMADDRESS=0xf41B47c54dEFF12f8fE830A411a09D865eBb120E - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONBRIDGEADDRESS=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E - ZKEVM_BRIDGE_NETWORKCONFIG_L2POLYGONBRIDGEADDRESSES=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E + - ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED} volumes: - ./test/test.keystore.claimtx:/pk/keystore.claimtxmanager - ./config/config.local.toml:/app/config.toml @@ -451,4 +451,37 @@ services: - "/bin/sh" - "-c" - "/app/zkevm-bridge run --cfg /app/config.toml" - \ No newline at end of file + + zkevm-bridge-service-3: + container_name: zkevm-bridge-service-3 + image: zkevm-bridge-service + ports: + - 8080:8080 + - 9090:9090 + environment: + - ZKEVM_BRIDGE_SYNCDB_USER=test_user + - ZKEVM_BRIDGE_SYNCDB_PASSWORD=test_password + - ZKEVM_BRIDGE_SYNCDB_NAME=test_db + - ZKEVM_BRIDGE_SYNCDB_HOST=zkevm-bridge-db + - ZKEVM_BRIDGE_SYNCDB_PORT=5432 + - ZKEVM_BRIDGE_BRIDGESERVER_DB_USER=test_user + - ZKEVM_BRIDGE_BRIDGESERVER_DB_PASSWORD=test_password + - ZKEVM_BRIDGE_BRIDGESERVER_DB_NAME=test_db + - ZKEVM_BRIDGE_BRIDGESERVER_DB_HOST=zkevm-bridge-db + - ZKEVM_BRIDGE_BRIDGESERVER_DB_PORT=5432 + - ZKEVM_BRIDGE_ETHERMAN_L1URL=http://zkevm-mock-l1-network-multi-rollup:8545 + - ZKEVM_BRIDGE_ETHERMAN_L2URLS=http://zkevm-node-1:8123,http://zkevm-node-2:8124 + - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONZKEVMGLOBALEXITROOTADDRESS=0x610178dA211FEF7D417bC0e6FeD39F05609AD788 + - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONROLLUPMANAGERADDRESS=0xA51c1fc2f0D1a1b8494Ed1FE312d7C3a78Ed91C0 + - ZKEVM_BRIDGE_NETWORKCONFIG_POLYGONBRIDGEADDRESS=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E + - ZKEVM_BRIDGE_NETWORKCONFIG_L2POLYGONBRIDGEADDRESSES=0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E,0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E + - ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED=${ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED} + volumes: + - ./test/test.keystore.claimtx:/pk/keystore.claimtxmanager + - ./config/config.local.toml:/app/config.toml + command: + - "/bin/sh" + - "-c" + - "/app/test-deploy-claimcompressor deploy --url \"http://zkevm-node-1:8123\" --bridgeAddress \"0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E\" --walletFile /pk/keystore.claimtxmanager --password \"testonly\" && + /app/test-deploy-claimcompressor deploy --url \"http://zkevm-node-2:8124\" --bridgeAddress \"0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E\" --walletFile /pk/keystore.claimtxmanager --password \"testonly\" && + /app/zkevm-bridge run --cfg /app/config.toml" \ No newline at end of file diff --git a/etherman/etherman.go b/etherman/etherman.go index b0b2ba61..98d4f0e5 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -104,8 +104,6 @@ const ( TokensOrder EventOrder = "TokenWrapped" // VerifyBatchOrder identifies a VerifyBatch event VerifyBatchOrder EventOrder = "VerifyBatch" - // ActivateEtrogOrder identifies the event to activate etrog - ActivateEtrogOrder EventOrder = "etrog" ) type ethClienter interface { @@ -122,16 +120,18 @@ type Client struct { PolygonZkEVMGlobalExitRoot *polygonzkevmglobalexitroot.Polygonzkevmglobalexitroot PolygonRollupManager *polygonrollupmanager.Polygonrollupmanager ClaimCompressor *claimcompressor.Claimcompressor - RollupID uint32 + NetworkID uint32 SCAddresses []common.Address + logger *log.Logger } // NewClient creates a new etherman. -func NewClient(cfg Config, polygonBridgeAddr, polygonZkEVMGlobalExitRootAddress, polygonRollupManagerAddress, polygonZkEvmAddress common.Address) (*Client, error) { +func NewClient(cfg Config, polygonBridgeAddr, polygonZkEVMGlobalExitRootAddress, polygonRollupManagerAddress common.Address) (*Client, error) { + logger := log.WithFields("networkID", 0) // Connect to ethereum node ethClient, err := ethclient.Dial(cfg.L1URL) if err != nil { - log.Errorf("error connecting to %s: %+v", cfg.L1URL, err) + logger.Errorf("error connecting to %s: %+v", cfg.L1URL, err) return nil, err } // Create smc clients @@ -152,22 +152,16 @@ func NewClient(cfg Config, polygonBridgeAddr, polygonZkEVMGlobalExitRootAddress, return nil, err } - // Get RollupID - rollupID, err := polygonRollupManager.RollupAddressToID(&bind.CallOpts{Pending: false}, polygonZkEvmAddress) - if err != nil { - return nil, err - } - log.Debug("rollupID: ", rollupID) var scAddresses []common.Address scAddresses = append(scAddresses, polygonZkEVMGlobalExitRootAddress, polygonBridgeAddr, polygonRollupManagerAddress) return &Client{ + logger: logger, EtherClient: ethClient, PolygonBridge: polygonBridge, OldPolygonBridge: oldpolygonBridge, PolygonZkEVMGlobalExitRoot: polygonZkEVMGlobalExitRoot, PolygonRollupManager: polygonRollupManager, - RollupID: rollupID, SCAddresses: scAddresses}, nil } @@ -199,15 +193,21 @@ func NewL2Client(url string, polygonBridgeAddr, claimCompressorAddress common.Ad return nil, err } } - + networkID, err := bridge.NetworkID(&bind.CallOpts{Pending: false}) + if err != nil { + return nil, err + } scAddresses := []common.Address{polygonBridgeAddr} + logger := log.WithFields("networkID", networkID) return &Client{ + logger: logger, EtherClient: ethClient, PolygonBridge: bridge, OldPolygonBridge: oldpolygonBridge, SCAddresses: scAddresses, ClaimCompressor: claimCompressor, + NetworkID: networkID, }, nil } @@ -218,7 +218,7 @@ func (etherMan *Client) GetRollupInfoByBlockRange(ctx context.Context, fromBlock query := ethereum.FilterQuery{ FromBlock: new(big.Int).SetUint64(fromBlock), Addresses: etherMan.SCAddresses, - Topics: [][]common.Hash{{updateGlobalExitRootSignatureHash, updateL1InfoTreeSignatureHash, depositEventSignatureHash, claimEventSignatureHash, oldClaimEventSignatureHash, newWrappedTokenEventSignatureHash, verifyBatchesTrustedAggregatorSignatureHash, rollupManagerVerifyBatchesSignatureHash, addExistingRollupSignatureHash, createNewRollupSignatureHash}}, + Topics: [][]common.Hash{{updateGlobalExitRootSignatureHash, updateL1InfoTreeSignatureHash, depositEventSignatureHash, claimEventSignatureHash, oldClaimEventSignatureHash, newWrappedTokenEventSignatureHash, verifyBatchesTrustedAggregatorSignatureHash, rollupManagerVerifyBatchesSignatureHash}}, } if toBlock != nil { query.ToBlock = new(big.Int).SetUint64(*toBlock) @@ -246,7 +246,7 @@ func (etherMan *Client) readEvents(ctx context.Context, query ethereum.FilterQue for _, vLog := range logs { err := etherMan.processEvent(ctx, vLog, &blocks, &blocksOrder) if err != nil { - log.Warnf("error processing event. Retrying... Error: %s. vLog: %+v", err.Error(), vLog) + etherMan.logger.Warnf("error processing event. Retrying... Error: %s. vLog: %+v", err.Error(), vLog) return nil, nil, err } } @@ -268,139 +268,141 @@ func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks case newWrappedTokenEventSignatureHash: return etherMan.tokenWrappedEvent(ctx, vLog, blocks, blocksOrder) case initializedProxySignatureHash: - log.Debug("Initialized proxy event detected. Ignoring...") + etherMan.logger.Debugf("Initialized proxy event detected. Ignoring...") return nil case adminChangedSignatureHash: - log.Debug("AdminChanged event detected. Ignoring...") + etherMan.logger.Debug("AdminChanged event detected. Ignoring...") return nil case beaconUpgradedSignatureHash: - log.Debug("BeaconUpgraded event detected. Ignoring...") + etherMan.logger.Debug("BeaconUpgraded event detected. Ignoring...") return nil case upgradedSignatureHash: - log.Debug("Upgraded event detected. Ignoring...") + etherMan.logger.Debug("Upgraded event detected. Ignoring...") return nil case transferOwnershipSignatureHash: - log.Debug("TransferOwnership event detected. Ignoring...") + etherMan.logger.Debug("TransferOwnership event detected. Ignoring...") return nil case setBatchFeeSignatureHash: - log.Debug("SetBatchFee event detected. Ignoring...") + etherMan.logger.Debug("SetBatchFee event detected. Ignoring...") return nil case setTrustedAggregatorSignatureHash: - log.Debug("SetTrustedAggregator event detected. Ignoring...") + etherMan.logger.Debug("SetTrustedAggregator event detected. Ignoring...") return nil case setVerifyBatchTimeTargetSignatureHash: - log.Debug("SetVerifyBatchTimeTarget event detected. Ignoring...") + etherMan.logger.Debug("SetVerifyBatchTimeTarget event detected. Ignoring...") return nil case setMultiplierBatchFeeSignatureHash: - log.Debug("SetMultiplierBatchFee event detected. Ignoring...") + etherMan.logger.Debug("SetMultiplierBatchFee event detected. Ignoring...") return nil case setPendingStateTimeoutSignatureHash: - log.Debug("SetPendingStateTimeout event detected. Ignoring...") + etherMan.logger.Debug("SetPendingStateTimeout event detected. Ignoring...") return nil case setTrustedAggregatorTimeoutSignatureHash: - log.Debug("SetTrustedAggregatorTimeout event detected. Ignoring...") + etherMan.logger.Debug("SetTrustedAggregatorTimeout event detected. Ignoring...") return nil case overridePendingStateSignatureHash: - log.Debug("OverridePendingState event detected. Ignoring...") + etherMan.logger.Debug("OverridePendingState event detected. Ignoring...") return nil case proveNonDeterministicPendingStateSignatureHash: - log.Debug("ProveNonDeterministicPendingState event detected. Ignoring...") + etherMan.logger.Debug("ProveNonDeterministicPendingState event detected. Ignoring...") return nil case consolidatePendingStateSignatureHash: - log.Debug("ConsolidatePendingState event detected. Ignoring...") + etherMan.logger.Debug("ConsolidatePendingState event detected. Ignoring...") return nil case verifyBatchesTrustedAggregatorSignatureHash: return etherMan.verifyBatchesTrustedAggregatorEvent(ctx, vLog, blocks, blocksOrder) case rollupManagerVerifyBatchesSignatureHash: return etherMan.verifyBatchesEvent(ctx, vLog, blocks, blocksOrder) case onSequenceBatchesSignatureHash: - log.Debug("OnSequenceBatches event detected. Ignoring...") + etherMan.logger.Debug("OnSequenceBatches event detected. Ignoring...") return nil case updateRollupSignatureHash: - log.Debug("UpdateRollup event detected. Ignoring...") + etherMan.logger.Debug("UpdateRollup event detected. Ignoring...") return nil case addExistingRollupSignatureHash: - return etherMan.AddExistingRollupEvent(ctx, vLog, blocks, blocksOrder) + etherMan.logger.Debug("AddExistingRollup event detected. Ignoring...") + return nil case createNewRollupSignatureHash: - return etherMan.createNewRollupEvent(ctx, vLog, blocks, blocksOrder) + etherMan.logger.Debug("CreateNewRollup event detected. Ignoring...") + return nil case obsoleteRollupTypeSignatureHash: - log.Debug("ObsoleteRollupType event detected. Ignoring...") + etherMan.logger.Debug("ObsoleteRollupType event detected. Ignoring...") return nil case addNewRollupTypeSignatureHash: - log.Debug("AddNewRollupType event detected. Ignoring...") + etherMan.logger.Debug("AddNewRollupType event detected. Ignoring...") return nil case initializedSignatureHash: - log.Debug("Initialized event detected. Ignoring...") + etherMan.logger.Debug("Initialized event detected. Ignoring...") return nil case roleAdminChangedSignatureHash: - log.Debug("RoleAdminChanged event detected. Ignoring...") + etherMan.logger.Debug("RoleAdminChanged event detected. Ignoring...") return nil case roleGrantedSignatureHash: - log.Debug("RoleGranted event detected. Ignoring...") + etherMan.logger.Debug("RoleGranted event detected. Ignoring...") return nil case roleRevokedSignatureHash: - log.Debug("RoleRevoked event detected. Ignoring...") + etherMan.logger.Debug("RoleRevoked event detected. Ignoring...") return nil case emergencyStateActivatedSignatureHash: - log.Debug("EmergencyStateActivated event detected. Ignoring...") + etherMan.logger.Debug("EmergencyStateActivated event detected. Ignoring...") return nil case emergencyStateDeactivatedSignatureHash: - log.Debug("EmergencyStateDeactivated event detected. Ignoring...") + etherMan.logger.Debug("EmergencyStateDeactivated event detected. Ignoring...") return nil case oldVerifyBatchesTrustedAggregatorSignatureHash: - log.Debug("OldVerifyBatchesTrustedAggregator event detected. Ignoring...") + etherMan.logger.Debug("OldVerifyBatchesTrustedAggregator event detected. Ignoring...") return nil case updateZkEVMVersionSignatureHash: - log.Debug("UpdateZkEVMVersion event detected. Ignoring...") + etherMan.logger.Debug("UpdateZkEVMVersion event detected. Ignoring...") return nil case oldConsolidatePendingStateSignatureHash: - log.Debug("OldConsolidatePendingState event detected. Ignoring...") + etherMan.logger.Debug("OldConsolidatePendingState event detected. Ignoring...") return nil case oldOverridePendingStateSignatureHash: - log.Debug("OldOverridePendingState event detected. Ignoring...") + etherMan.logger.Debug("OldOverridePendingState event detected. Ignoring...") return nil case sequenceBatchesPreEtrogSignatureHash: - log.Debug("SequenceBatchesPreEtrog event detected. Ignoring...") + etherMan.logger.Debug("SequenceBatchesPreEtrog event detected. Ignoring...") return nil case setForceBatchTimeoutSignatureHash: - log.Debug("SetForceBatchTimeout event detected. Ignoring...") + etherMan.logger.Debug("SetForceBatchTimeout event detected. Ignoring...") return nil case setTrustedSequencerURLSignatureHash: - log.Debug("SetTrustedSequencerURL event detected. Ignoring...") + etherMan.logger.Debug("SetTrustedSequencerURL event detected. Ignoring...") return nil case setTrustedSequencerSignatureHash: - log.Debug("SetTrustedSequencer event detected. Ignoring...") + etherMan.logger.Debug("SetTrustedSequencer event detected. Ignoring...") return nil case verifyBatchesSignatureHash: - log.Debug("VerifyBatches event detected. Ignoring...") + etherMan.logger.Debug("VerifyBatches event detected. Ignoring...") return nil case sequenceForceBatchesSignatureHash: - log.Debug("SequenceForceBatches event detected. Ignoring...") + etherMan.logger.Debug("SequenceForceBatches event detected. Ignoring...") return nil case forceBatchSignatureHash: - log.Debug("ForceBatch event detected. Ignoring...") + etherMan.logger.Debug("ForceBatch event detected. Ignoring...") return nil case sequenceBatchesSignatureHash: - log.Debug("SequenceBatches event detected. Ignoring...") + etherMan.logger.Debug("SequenceBatches event detected. Ignoring...") return nil case acceptAdminRoleSignatureHash: - log.Debug("AcceptAdminRole event detected. Ignoring...") + etherMan.logger.Debug("AcceptAdminRole event detected. Ignoring...") return nil case transferAdminRoleSignatureHash: - log.Debug("TransferAdminRole event detected. Ignoring...") + etherMan.logger.Debug("TransferAdminRole event detected. Ignoring...") return nil } - log.Warnf("Event not registered: %+v", vLog) + etherMan.logger.Warnf("Event not registered: %+v", vLog) return nil } func (etherMan *Client) updateGlobalExitRootEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("UpdateGlobalExitRoot event detected. Processing...") + etherMan.logger.Debug("UpdateGlobalExitRoot event detected. Processing...") return etherMan.processUpdateGlobalExitRootEvent(ctx, vLog.Topics[1], vLog.Topics[2], vLog, blocks, blocksOrder) } func (etherMan *Client) updateL1InfoTreeEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("UpdateL1InfoTree event detected") + etherMan.logger.Debug("UpdateL1InfoTree event detected") globalExitRoot, err := etherMan.PolygonZkEVMGlobalExitRoot.ParseUpdateL1InfoTree(vLog) if err != nil { return err @@ -428,7 +430,7 @@ func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, ma } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].GlobalExitRoots = append((*blocks)[len(*blocks)-1].GlobalExitRoots, gExitRoot) } else { - log.Error("Error processing UpdateGlobalExitRoot event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + etherMan.logger.Error("Error processing UpdateGlobalExitRoot event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing UpdateGlobalExitRoot event") } or := Order{ @@ -440,7 +442,7 @@ func (etherMan *Client) processUpdateGlobalExitRootEvent(ctx context.Context, ma } func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("Deposit event detected. Processing...") + etherMan.logger.Debug("Deposit event detected. Processing...") d, err := etherMan.PolygonBridge.ParseBridgeEvent(vLog) if err != nil { return err @@ -468,7 +470,7 @@ func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Deposits = append((*blocks)[len(*blocks)-1].Deposits, deposit) } else { - log.Error("Error processing deposit event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + etherMan.logger.Error("Error processing deposit event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing Deposit event") } or := Order{ @@ -480,7 +482,7 @@ func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks } func (etherMan *Client) oldClaimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("Old claim event detected. Processing...") + etherMan.logger.Debug("Old claim event detected. Processing...") c, err := etherMan.OldPolygonBridge.ParseClaimEvent(vLog) if err != nil { return err @@ -489,7 +491,7 @@ func (etherMan *Client) oldClaimEvent(ctx context.Context, vLog types.Log, block } func (etherMan *Client) newClaimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("New claim event detected. Processing...") + etherMan.logger.Debug("New claim event detected. Processing...") c, err := etherMan.PolygonBridge.ParseClaimEvent(vLog) if err != nil { return err @@ -524,7 +526,7 @@ func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks * } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Claims = append((*blocks)[len(*blocks)-1].Claims, claim) } else { - log.Error("Error processing claim event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + etherMan.logger.Error("Error processing claim event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing claim event") } or := Order{ @@ -536,7 +538,7 @@ func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks * } func (etherMan *Client) tokenWrappedEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("TokenWrapped event detected. Processing...") + etherMan.logger.Debug("TokenWrapped event detected. Processing...") tw, err := etherMan.PolygonBridge.ParseNewWrappedToken(vLog) if err != nil { return err @@ -558,7 +560,7 @@ func (etherMan *Client) tokenWrappedEvent(ctx context.Context, vLog types.Log, b } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].Tokens = append((*blocks)[len(*blocks)-1].Tokens, tokenWrapped) } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + etherMan.logger.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing TokenWrapped event") } or := Order{ @@ -607,29 +609,25 @@ func (etherMan *Client) EthBlockByNumber(ctx context.Context, blockNumber uint64 } // GetNetworkID gets the network ID of the dedicated chain. -func (etherMan *Client) GetNetworkID(ctx context.Context) (uint, error) { - networkID, err := etherMan.PolygonBridge.NetworkID(&bind.CallOpts{Pending: false}) - if err != nil { - return 0, err - } - return uint(networkID), nil +func (etherMan *Client) GetNetworkID() uint { + return uint(etherMan.NetworkID) } func (etherMan *Client) verifyBatchesTrustedAggregatorEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("VerifyBatchesTrustedAggregator event detected. Processing...") + etherMan.logger.Debug("VerifyBatchesTrustedAggregator event detected. Processing...") vb, err := etherMan.PolygonRollupManager.ParseVerifyBatchesTrustedAggregator(vLog) if err != nil { - log.Error("error parsing verifyBatchesTrustedAggregator event. Error: ", err) + etherMan.logger.Error("error parsing verifyBatchesTrustedAggregator event. Error: ", err) return err } return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) } func (etherMan *Client) verifyBatchesEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("RollupManagerVerifyBatches event detected. Processing...") + etherMan.logger.Debug("RollupManagerVerifyBatches event detected. Processing...") vb, err := etherMan.PolygonRollupManager.ParseVerifyBatches(vLog) if err != nil { - log.Error("error parsing VerifyBatches event. Error: ", err) + etherMan.logger.Error("error parsing VerifyBatches event. Error: ", err) return err } return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) @@ -656,7 +654,7 @@ func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, block } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { (*blocks)[len(*blocks)-1].VerifiedBatches = append((*blocks)[len(*blocks)-1].VerifiedBatches, verifyBatch) } else { - log.Error("Error processing verifyBatch event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) + etherMan.logger.Error("Error processing verifyBatch event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) return fmt.Errorf("error processing verifyBatch event") } or := Order{ @@ -667,10 +665,6 @@ func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, block return nil } -func (etherMan *Client) GetRollupID() uint { - return uint(etherMan.RollupID) -} - func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint64, uint64, error) { const lengthGlobalIndexInBytes = 32 var buf [32]byte @@ -702,74 +696,10 @@ func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint, localExitRootIndex return big.NewInt(0).SetBytes(globalIndexBytes) } -func (etherMan *Client) createNewRollupEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("CreateNewRollup event detected. Processing...") - rollup, err := etherMan.PolygonRollupManager.ParseCreateNewRollup(vLog) - if err != nil { - return err - } - if rollup.RollupID != etherMan.RollupID { - return nil - } - - if len(*blocks) == 0 || ((*blocks)[len(*blocks)-1].BlockHash != vLog.BlockHash || (*blocks)[len(*blocks)-1].BlockNumber != vLog.BlockNumber) { - fullBlock, err := etherMan.EtherClient.HeaderByHash(ctx, vLog.BlockHash) - if err != nil { - return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - } - block := prepareBlock(vLog, time.Unix(int64(fullBlock.Time), 0), fullBlock) - block.ActivateEtrog = append(block.ActivateEtrog, true) - *blocks = append(*blocks, block) - } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { - (*blocks)[len(*blocks)-1].ActivateEtrog = append((*blocks)[len(*blocks)-1].ActivateEtrog, true) - } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) - return fmt.Errorf("error processing TokenWrapped event") - } - or := Order{ - Name: ActivateEtrogOrder, - Pos: len((*blocks)[len(*blocks)-1].ActivateEtrog) - 1, - } - (*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash] = append((*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash], or) - return nil -} - -func (etherMan *Client) AddExistingRollupEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { - log.Debug("AddExistingRollup event detected. Processing...") - rollup, err := etherMan.PolygonRollupManager.ParseAddExistingRollup(vLog) - if err != nil { - return err - } - if rollup.RollupID != etherMan.RollupID { - return nil - } - - if len(*blocks) == 0 || ((*blocks)[len(*blocks)-1].BlockHash != vLog.BlockHash || (*blocks)[len(*blocks)-1].BlockNumber != vLog.BlockNumber) { - fullBlock, err := etherMan.EtherClient.HeaderByHash(ctx, vLog.BlockHash) - if err != nil { - return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %v", vLog.BlockNumber, err) - } - block := prepareBlock(vLog, time.Unix(int64(fullBlock.Time), 0), fullBlock) - block.ActivateEtrog = append(block.ActivateEtrog, true) - *blocks = append(*blocks, block) - } else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber { - (*blocks)[len(*blocks)-1].ActivateEtrog = append((*blocks)[len(*blocks)-1].ActivateEtrog, true) - } else { - log.Error("Error processing TokenWrapped event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber) - return fmt.Errorf("error processing TokenWrapped event") - } - or := Order{ - Name: ActivateEtrogOrder, - Pos: len((*blocks)[len(*blocks)-1].ActivateEtrog) - 1, - } - (*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash] = append((*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash], or) - return nil -} - func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error) { claimTx, err := etherMan.ClaimCompressor.SendCompressedClaims(auth, compressedTxData) if err != nil { - log.Error("failed to call SMC SendCompressedClaims: %v", err) + etherMan.logger.Error("failed to call SMC SendCompressedClaims: %v", err) return nil, err } return claimTx, err @@ -778,7 +708,7 @@ func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressed func (etherMan *Client) CompressClaimCall(mainnetExitRoot, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error) { compressedData, err := etherMan.ClaimCompressor.CompressClaimCall(&bind.CallOpts{Pending: false}, mainnetExitRoot, rollupExitRoot, claimData) if err != nil { - log.Errorf("fails call to claimCompressorSMC. Error: %v", err) + etherMan.logger.Errorf("fails call to claimCompressorSMC. Error: %v", err) return []byte{}, nil } return compressedData, nil diff --git a/etherman/simulated.go b/etherman/simulated.go index e1246463..7940d728 100644 --- a/etherman/simulated.go +++ b/etherman/simulated.go @@ -186,5 +186,12 @@ func NewSimulatedEtherman(cfg Config, auth *bind.TransactOpts) (*Client, *backen } client.Commit() - return &Client{EtherClient: client, PolygonBridge: br, PolygonZkEVMGlobalExitRoot: globalExitRoot, PolygonRollupManager: rollupManager, SCAddresses: []common.Address{exitManagerAddr, bridgeAddr, mockRollupManagerAddr}}, client, polAddr, br, trueZkevm, nil + networkID, err := br.NetworkID(&bind.CallOpts{Pending: false}) + if err != nil { + log.Error("error: ", err) + return nil, nil, common.Address{}, nil, nil, err + } + logger := log.WithFields("networkID", networkID) + + return &Client{EtherClient: client, PolygonBridge: br, PolygonZkEVMGlobalExitRoot: globalExitRoot, PolygonRollupManager: rollupManager, SCAddresses: []common.Address{exitManagerAddr, bridgeAddr, mockRollupManagerAddr}, logger: logger}, client, polAddr, br, trueZkevm, nil } diff --git a/etherman/types.go b/etherman/types.go index 57d146ce..800f8c9b 100644 --- a/etherman/types.go +++ b/etherman/types.go @@ -29,10 +29,12 @@ type GlobalExitRoot struct { BlockNumber uint64 ExitRoots []common.Hash GlobalExitRoot common.Hash + NetworkID uint } // Deposit struct type Deposit struct { + Id uint64 LeafType uint8 OriginalNetwork uint OriginalAddress common.Address diff --git a/server/interfaces.go b/server/interfaces.go index 1a489cdc..7fc0400d 100644 --- a/server/interfaces.go +++ b/server/interfaces.go @@ -12,7 +12,7 @@ type bridgeServiceStorage interface { Get(ctx context.Context, key []byte, dbTx pgx.Tx) ([][]byte, error) GetRoot(ctx context.Context, depositCnt uint, network uint, dbTx pgx.Tx) ([]byte, error) GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) - GetLatestExitRoot(ctx context.Context, isRollup bool, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetExitRootByGER(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetClaim(ctx context.Context, index uint, originNetworkID, networkID uint, dbTx pgx.Tx) (*etherman.Claim, error) GetClaims(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Claim, error) diff --git a/server/service.go b/server/service.go index 53edf6ce..e42acf68 100644 --- a/server/service.go +++ b/server/service.go @@ -16,7 +16,6 @@ import ( ) type bridgeService struct { - rollupID uint storage bridgeServiceStorage networkIDs map[uint]uint8 height uint8 @@ -28,7 +27,7 @@ type bridgeService struct { } // NewBridgeService creates new bridge service. -func NewBridgeService(cfg Config, height uint8, networks []uint, storage interface{}, rollupID uint) *bridgeService { +func NewBridgeService(cfg Config, height uint8, networks []uint, storage interface{}) *bridgeService { var networkIDs = make(map[uint]uint8) for i, network := range networks { networkIDs[network] = uint8(i) @@ -38,7 +37,6 @@ func NewBridgeService(cfg Config, height uint8, networks []uint, storage interfa panic(err) } return &bridgeService{ - rollupID: rollupID, storage: storage.(bridgeServiceStorage), height: height, networkIDs: networkIDs, @@ -49,14 +47,6 @@ func NewBridgeService(cfg Config, height uint8, networks []uint, storage interfa } } -func (s *bridgeService) getNetworkID(networkID uint) (uint8, error) { - tID, found := s.networkIDs[networkID] - if !found { - return 0, gerror.ErrNetworkNotRegister - } - return tID, nil -} - // getNode returns the children hash pairs for a given parent hash. func (s *bridgeService) getNode(ctx context.Context, parentHash [bridgectrl.KeyLen]byte, dbTx pgx.Tx) (left, right [bridgectrl.KeyLen]byte, err error) { value, ok := s.cache.Get(string(parentHash[:])) @@ -146,6 +136,9 @@ func (s *bridgeService) getRollupExitProof(rollupIndex uint, root common.Hash, d } else if root != r { return nil, common.Hash{}, fmt.Errorf("error checking calculated root: %s, %s", root.String(), r.String()) } + if len(ls) <= int(rollupIndex) { + return siblings, common.Hash{}, fmt.Errorf("error getting rollupLeaf. Not synced yet") + } return siblings, ls[rollupIndex], nil } @@ -153,23 +146,16 @@ func (s *bridgeService) getRollupExitProof(rollupIndex uint, root common.Hash, d func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { ctx := context.Background() - if dbTx == nil { // if the call comes from the rest API - deposit, err := s.storage.GetDeposit(ctx, depositCnt, networkID, nil) - if err != nil { - return nil, nil, nil, err - } - - if !deposit.ReadyForClaim { - return nil, nil, nil, gerror.ErrDepositNotSynced - } - } - - tID, err := s.getNetworkID(networkID) + deposit, err := s.storage.GetDeposit(ctx, depositCnt, networkID, dbTx) if err != nil { return nil, nil, nil, err } - globalExitRoot, err := s.storage.GetLatestExitRoot(ctx, tID != 0, dbTx) + if !deposit.ReadyForClaim { + return nil, nil, nil, gerror.ErrDepositNotSynced + } + + globalExitRoot, err := s.storage.GetLatestExitRoot(ctx, networkID, deposit.DestinationNetwork, dbTx) if err != nil { return nil, nil, nil, err } @@ -180,14 +166,14 @@ func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) ( rollupLeaf common.Hash ) if networkID == 0 { // Mainnet - merkleProof, err = s.getProof(depositCnt, globalExitRoot.ExitRoots[tID], dbTx) + merkleProof, err = s.getProof(depositCnt, globalExitRoot.ExitRoots[0], dbTx) if err != nil { log.Error("error getting merkleProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the proof failed, error: %v, network: %d", err, networkID) } rollupMerkleProof = emptyProof() } else { // Rollup - rollupMerkleProof, rollupLeaf, err = s.getRollupExitProof(s.rollupID-1, globalExitRoot.ExitRoots[tID], dbTx) + rollupMerkleProof, rollupLeaf, err = s.getRollupExitProof(networkID-1, globalExitRoot.ExitRoots[1], dbTx) if err != nil { log.Error("error getting rollupProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the rollup proof failed, error: %v, network: %d", err, networkID) @@ -217,11 +203,6 @@ func (s *bridgeService) GetClaimProofForCompressed(ger common.Hash, depositCnt, } } - tID, err := s.getNetworkID(networkID) - if err != nil { - return nil, nil, nil, err - } - globalExitRoot, err := s.storage.GetExitRootByGER(ctx, ger, dbTx) if err != nil { return nil, nil, nil, err @@ -233,14 +214,14 @@ func (s *bridgeService) GetClaimProofForCompressed(ger common.Hash, depositCnt, rollupLeaf common.Hash ) if networkID == 0 { // Mainnet - merkleProof, err = s.getProof(depositCnt, globalExitRoot.ExitRoots[tID], dbTx) + merkleProof, err = s.getProof(depositCnt, globalExitRoot.ExitRoots[0], dbTx) if err != nil { log.Error("error getting merkleProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the proof failed, error: %v, network: %d", err, networkID) } rollupMerkleProof = emptyProof() } else { // Rollup - rollupMerkleProof, rollupLeaf, err = s.getRollupExitProof(s.rollupID-1, globalExitRoot.ExitRoots[tID], dbTx) + rollupMerkleProof, rollupLeaf, err = s.getRollupExitProof(networkID-1, globalExitRoot.ExitRoots[1], dbTx) if err != nil { log.Error("error getting rollupProof. Error: ", err) return nil, nil, nil, fmt.Errorf("getting the rollup proof failed, error: %v, network: %d", err, networkID) @@ -314,7 +295,10 @@ func (s *bridgeService) GetBridges(ctx context.Context, req *pb.GetBridgesReques return nil, err } mainnetFlag := deposit.NetworkID == 0 - rollupIndex := s.rollupID - 1 + var rollupIndex uint + if !mainnetFlag { + rollupIndex = deposit.NetworkID - 1 + } localExitRootIndex := deposit.DepositCount pbDeposits = append( pbDeposits, &pb.Deposit{ diff --git a/synchronizer/interfaces.go b/synchronizer/interfaces.go index 77bf7e51..488311d4 100644 --- a/synchronizer/interfaces.go +++ b/synchronizer/interfaces.go @@ -16,8 +16,7 @@ type ethermanInterface interface { HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) EthBlockByNumber(ctx context.Context, blockNumber uint64) (*types.Block, error) - GetNetworkID(ctx context.Context) (uint, error) - GetRollupID() uint + GetNetworkID() uint } type storageInterface interface { @@ -36,7 +35,6 @@ type storageInterface interface { AddTrustedGlobalExitRoot(ctx context.Context, trustedExitRoot *etherman.GlobalExitRoot, dbTx pgx.Tx) (bool, error) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) CheckIfRootExists(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (bool, error) - IsLxLyActivated(ctx context.Context, dbTx pgx.Tx) (bool, error) } type bridgectrlInterface interface { diff --git a/synchronizer/mock_etherman.go b/synchronizer/mock_etherman.go index a6aec43e..6b13f61d 100644 --- a/synchronizer/mock_etherman.go +++ b/synchronizer/mock_etherman.go @@ -87,68 +87,12 @@ func (_c *ethermanMock_EthBlockByNumber_Call) RunAndReturn(run func(context.Cont return _c } -// GetNetworkID provides a mock function with given fields: ctx -func (_m *ethermanMock) GetNetworkID(ctx context.Context) (uint, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetNetworkID") - } - - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ethermanMock_GetNetworkID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkID' -type ethermanMock_GetNetworkID_Call struct { - *mock.Call -} - -// GetNetworkID is a helper method to define mock.On call -// - ctx context.Context -func (_e *ethermanMock_Expecter) GetNetworkID(ctx interface{}) *ethermanMock_GetNetworkID_Call { - return ðermanMock_GetNetworkID_Call{Call: _e.mock.On("GetNetworkID", ctx)} -} - -func (_c *ethermanMock_GetNetworkID_Call) Run(run func(ctx context.Context)) *ethermanMock_GetNetworkID_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *ethermanMock_GetNetworkID_Call) Return(_a0 uint, _a1 error) *ethermanMock_GetNetworkID_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ethermanMock_GetNetworkID_Call) RunAndReturn(run func(context.Context) (uint, error)) *ethermanMock_GetNetworkID_Call { - _c.Call.Return(run) - return _c -} - -// GetRollupID provides a mock function with given fields: -func (_m *ethermanMock) GetRollupID() uint { +// GetNetworkID provides a mock function with given fields: +func (_m *ethermanMock) GetNetworkID() uint { ret := _m.Called() if len(ret) == 0 { - panic("no return value specified for GetRollupID") + panic("no return value specified for GetNetworkID") } var r0 uint @@ -161,29 +105,29 @@ func (_m *ethermanMock) GetRollupID() uint { return r0 } -// ethermanMock_GetRollupID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupID' -type ethermanMock_GetRollupID_Call struct { +// ethermanMock_GetNetworkID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkID' +type ethermanMock_GetNetworkID_Call struct { *mock.Call } -// GetRollupID is a helper method to define mock.On call -func (_e *ethermanMock_Expecter) GetRollupID() *ethermanMock_GetRollupID_Call { - return ðermanMock_GetRollupID_Call{Call: _e.mock.On("GetRollupID")} +// GetNetworkID is a helper method to define mock.On call +func (_e *ethermanMock_Expecter) GetNetworkID() *ethermanMock_GetNetworkID_Call { + return ðermanMock_GetNetworkID_Call{Call: _e.mock.On("GetNetworkID")} } -func (_c *ethermanMock_GetRollupID_Call) Run(run func()) *ethermanMock_GetRollupID_Call { +func (_c *ethermanMock_GetNetworkID_Call) Run(run func()) *ethermanMock_GetNetworkID_Call { _c.Call.Run(func(args mock.Arguments) { run() }) return _c } -func (_c *ethermanMock_GetRollupID_Call) Return(_a0 uint) *ethermanMock_GetRollupID_Call { +func (_c *ethermanMock_GetNetworkID_Call) Return(_a0 uint) *ethermanMock_GetNetworkID_Call { _c.Call.Return(_a0) return _c } -func (_c *ethermanMock_GetRollupID_Call) RunAndReturn(run func() uint) *ethermanMock_GetRollupID_Call { +func (_c *ethermanMock_GetNetworkID_Call) RunAndReturn(run func() uint) *ethermanMock_GetNetworkID_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/mock_storage.go b/synchronizer/mock_storage.go index 3ae03f33..89468d74 100644 --- a/synchronizer/mock_storage.go +++ b/synchronizer/mock_storage.go @@ -745,63 +745,6 @@ func (_c *storageMock_GetPreviousBlock_Call) RunAndReturn(run func(context.Conte return _c } -// IsLxLyActivated provides a mock function with given fields: ctx, dbTx -func (_m *storageMock) IsLxLyActivated(ctx context.Context, dbTx pgx.Tx) (bool, error) { - ret := _m.Called(ctx, dbTx) - - if len(ret) == 0 { - panic("no return value specified for IsLxLyActivated") - } - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (bool, error)); ok { - return rf(ctx, dbTx) - } - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) bool); ok { - r0 = rf(ctx, dbTx) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok { - r1 = rf(ctx, dbTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// storageMock_IsLxLyActivated_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsLxLyActivated' -type storageMock_IsLxLyActivated_Call struct { - *mock.Call -} - -// IsLxLyActivated is a helper method to define mock.On call -// - ctx context.Context -// - dbTx pgx.Tx -func (_e *storageMock_Expecter) IsLxLyActivated(ctx interface{}, dbTx interface{}) *storageMock_IsLxLyActivated_Call { - return &storageMock_IsLxLyActivated_Call{Call: _e.mock.On("IsLxLyActivated", ctx, dbTx)} -} - -func (_c *storageMock_IsLxLyActivated_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *storageMock_IsLxLyActivated_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(pgx.Tx)) - }) - return _c -} - -func (_c *storageMock_IsLxLyActivated_Call) Return(_a0 bool, _a1 error) *storageMock_IsLxLyActivated_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *storageMock_IsLxLyActivated_Call) RunAndReturn(run func(context.Context, pgx.Tx) (bool, error)) *storageMock_IsLxLyActivated_Call { - _c.Call.Return(run) - return _c -} - // Reset provides a mock function with given fields: ctx, blockNumber, networkID, dbTx func (_m *storageMock) Reset(ctx context.Context, blockNumber uint64, networkID uint, dbTx pgx.Tx) error { ret := _m.Called(ctx, blockNumber, networkID, dbTx) diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 6acf343f..4e0823ad 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -22,19 +22,20 @@ type Synchronizer interface { // ClientSynchronizer connects L1 and L2 type ClientSynchronizer struct { - etherMan ethermanInterface - bridgeCtrl bridgectrlInterface - storage storageInterface - ctx context.Context - cancelCtx context.CancelFunc - genBlockNumber uint64 - cfg Config - networkID uint - chExitRootEvent chan *etherman.GlobalExitRoot - chSynced chan uint - zkEVMClient zkEVMClientInterface - synced bool - l1RollupExitRoot common.Hash + etherMan ethermanInterface + bridgeCtrl bridgectrlInterface + storage storageInterface + ctx context.Context + cancelCtx context.CancelFunc + genBlockNumber uint64 + cfg Config + networkID uint + chExitRootEventL2 chan *etherman.GlobalExitRoot + chsExitRootEvent []chan *etherman.GlobalExitRoot + chSynced chan uint + zkEVMClient zkEVMClientInterface + synced bool + l1RollupExitRoot common.Hash } // NewSynchronizer creates and initializes an instance of Synchronizer @@ -45,14 +46,12 @@ func NewSynchronizer( ethMan ethermanInterface, zkEVMClient zkEVMClientInterface, genBlockNumber uint64, - chExitRootEvent chan *etherman.GlobalExitRoot, + chExitRootEventL2 chan *etherman.GlobalExitRoot, + chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint, cfg Config) (Synchronizer, error) { ctx, cancel := context.WithCancel(parentCtx) - networkID, err := ethMan.GetNetworkID(ctx) - if err != nil { - log.Fatal("error getting networkID. Error: ", err) - } + networkID := ethMan.GetNetworkID() ger, err := storage.(storageInterface).GetLatestL1SyncedExitRoot(ctx, nil) if err != nil { if err == gerror.ErrStorageNotFound { @@ -62,14 +61,6 @@ func NewSynchronizer( } } - // Read db to see if the LxLy is already activated - isActivated, err := storage.(storageInterface).IsLxLyActivated(ctx, nil) - if err != nil { - log.Fatal("error checking if LxLyEtrog is activated. Error: ", err) - } - if isActivated { - log.Info("LxLyEtrog already activated") - } if networkID == 0 { return &ClientSynchronizer{ bridgeCtrl: bridge, @@ -80,22 +71,23 @@ func NewSynchronizer( genBlockNumber: genBlockNumber, cfg: cfg, networkID: networkID, - chExitRootEvent: chExitRootEvent, chSynced: chSynced, - zkEVMClient: zkEVMClient, + chsExitRootEvent: chsExitRootEvent, l1RollupExitRoot: ger.ExitRoots[1], }, nil } return &ClientSynchronizer{ - bridgeCtrl: bridge, - storage: storage.(storageInterface), - etherMan: ethMan, - ctx: ctx, - cancelCtx: cancel, - genBlockNumber: genBlockNumber, - cfg: cfg, - chSynced: chSynced, - networkID: networkID, + bridgeCtrl: bridge, + storage: storage.(storageInterface), + etherMan: ethMan, + ctx: ctx, + cancelCtx: cancel, + genBlockNumber: genBlockNumber, + cfg: cfg, + chSynced: chSynced, + zkEVMClient: zkEVMClient, + chExitRootEventL2: chExitRootEventL2, + networkID: networkID, }, nil } @@ -166,8 +158,8 @@ func (s *ClientSynchronizer) Sync() error { } } } - } else { // Sync Trusted GlobalExitRoots if L1 is synced - if s.networkID != 0 { + } else { // Sync Trusted GlobalExitRoots if L2 network is synced + if s.networkID == 0 { continue } log.Infof("networkID: %d, Virtual state is synced, getting trusted state", s.networkID) @@ -206,6 +198,7 @@ func (s *ClientSynchronizer) syncTrustedState() error { return nil } ger := ðerman.GlobalExitRoot{ + NetworkID: s.networkID, GlobalExitRoot: lastGER, ExitRoots: []common.Hash{ exitRoots.MainnetExitRoot, @@ -218,7 +211,8 @@ func (s *ClientSynchronizer) syncTrustedState() error { return err } if isUpdated { - s.chExitRootEvent <- ger + log.Debug("adding trusted ger to the channels. GER: ", lastGER) + s.chExitRootEventL2 <- ger } return nil } @@ -258,6 +252,7 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether log.Debugf("NetworkID: %d, Setting toBlock to the lastKnownBlock: %s", s.networkID, lastKnownBlock.String()) toBlock = lastKnownBlock.Uint64() if !s.synced { + fromBlock = lastBlockSynced.BlockNumber log.Infof("NetworkID %d Synced!", s.networkID) waitDuration = s.cfg.SyncInterval.Duration s.synced = true @@ -348,7 +343,7 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether } } - if lastKnownBlock.Cmp(new(big.Int).SetUint64(toBlock)) < 1 { + if lastKnownBlock.Cmp(new(big.Int).SetUint64(toBlock)) < 1 { // lastKnownBlock <= toBlock if !s.synced { log.Infof("NetworkID %d Synced!", s.networkID) waitDuration = s.cfg.SyncInterval.Duration @@ -429,9 +424,6 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma if err != nil { return err } - case etherman.ActivateEtrogOrder: - // this is activated when the bridge detects the CreateNewRollup or the AddExistingRollup event from the rollupManager - log.Infof("NetworkID: %d, Event received. Activating LxLyEtrog...", s.networkID) } } err = s.storage.Commit(s.ctx, dbTx) @@ -457,7 +449,9 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma if s.l1RollupExitRoot != ger.ExitRoots[1] { log.Debugf("Updating ger: %+v", ger) s.l1RollupExitRoot = ger.ExitRoots[1] - s.chExitRootEvent <- ger + for _, ch := range s.chsExitRootEvent { + ch <- ger + } } } return nil @@ -553,11 +547,7 @@ func (s *ClientSynchronizer) checkReorg(latestStoredBlock, syncedBlock *etherman log.Error("error: ", err) return nil, err } - } else { - log.Infof("NetworkID: %d, [checkReorg function] Using block %d from GetRollupInfoByBlockRange", s.networkID, block.BlockNumber) } - log.Infof("NetworkID: %d, [checkReorg function] BlockNumber: %d BlockHash got from L1 provider: %s", s.networkID, block.BlockNumber, block.BlockHash.String()) - log.Infof("NetworkID: %d, [checkReorg function] reorgedBlockNumber: %d reorgedBlockHash already synced: %s", s.networkID, reorgedBlock.BlockNumber, reorgedBlock.BlockHash.String()) // Compare hashes if (block.BlockHash != reorgedBlock.BlockHash || block.ParentHash != reorgedBlock.ParentHash) && reorgedBlock.BlockNumber > s.genBlockNumber { @@ -599,33 +589,31 @@ func (s *ClientSynchronizer) checkReorg(latestStoredBlock, syncedBlock *etherman } func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBatch, blockID uint64, dbTx pgx.Tx) error { - if verifyBatch.RollupID == s.etherMan.GetRollupID() { - if verifyBatch.LocalExitRoot == (common.Hash{}) { - log.Debugf("networkID: %d, skipping empty local exit root in verifyBatch event. VerifyBatch: %+v", s.networkID, verifyBatch) - return nil - } - // Just check that the calculated RollupExitRoot is fine - ok, err := s.storage.CheckIfRootExists(s.ctx, verifyBatch.LocalExitRoot.Bytes(), uint8(verifyBatch.RollupID), dbTx) - if err != nil { - log.Errorf("networkID: %d, error Checking if root exists. Error: %v", s.networkID, err) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) - if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", - s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) - return rollbackErr - } - return err + if verifyBatch.LocalExitRoot == (common.Hash{}) { + log.Debugf("networkID: %d, skipping empty local exit root in verifyBatch event. VerifyBatch: %+v", s.networkID, verifyBatch) + return nil + } + // Just check that the calculated RollupExitRoot is fine + ok, err := s.storage.CheckIfRootExists(s.ctx, verifyBatch.LocalExitRoot.Bytes(), uint8(verifyBatch.RollupID), dbTx) + if err != nil { + log.Errorf("networkID: %d, error Checking if root exists. Error: %v", s.networkID, err) + rollbackErr := s.storage.Rollback(s.ctx, dbTx) + if rollbackErr != nil { + log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) + return rollbackErr } - if !ok { - log.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) - if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", - s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) - return rollbackErr - } - return fmt.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) + return err + } + if !ok { + log.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) + rollbackErr := s.storage.Rollback(s.ctx, dbTx) + if rollbackErr != nil { + log.Errorf("networkID: %d, error rolling back state. BlockNumber: %d, rollbackErr: %v, error : %s", + s.networkID, verifyBatch.BlockNumber, rollbackErr, err.Error()) + return rollbackErr } + return fmt.Errorf("networkID: %d, Root: %s doesn't exist!", s.networkID, verifyBatch.LocalExitRoot.String()) } rollupLeaf := etherman.RollupExitLeaf{ BlockID: blockID, @@ -633,7 +621,7 @@ func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBat RollupId: verifyBatch.RollupID, } // Update rollupExitRoot - err := s.bridgeCtrl.AddRollupExitLeaf(s.ctx, rollupLeaf, dbTx) + err = s.bridgeCtrl.AddRollupExitLeaf(s.ctx, rollupLeaf, dbTx) if err != nil { log.Errorf("networkID: %d, error adding rollup exit leaf. Error: %v", s.networkID, err) rollbackErr := s.storage.Rollback(s.ctx, dbTx) diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index c9a36228..9710ae28 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -32,14 +32,12 @@ func NewSynchronizerTest( ethMan ethermanInterface, zkEVMClient zkEVMClientInterface, genBlockNumber uint64, - chExitRootEvent chan *etherman.GlobalExitRoot, + chExitRootEventL2 chan *etherman.GlobalExitRoot, + chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint, cfg Config) (Synchronizer, error) { ctx, cancel := context.WithCancel(parentCtx) - networkID, err := ethMan.GetNetworkID(ctx) - if err != nil { - log.Fatal("error getting networkID. Error: ", err) - } + networkID := ethMan.GetNetworkID() ger, err := storage.(storageInterface).GetLatestL1SyncedExitRoot(ctx, nil) if err != nil { if err == gerror.ErrStorageNotFound { @@ -49,14 +47,6 @@ func NewSynchronizerTest( } } - // Read db to see if the LxLy is already activated - isActivated, err := storage.(storageInterface).IsLxLyActivated(ctx, nil) - if err != nil { - log.Fatal("error checking if LxLyEtrog is activated. Error: ", err) - } - if isActivated { - log.Info("LxLyEtrog already activated") - } if networkID == 0 { return &ClientSynchronizer{ bridgeCtrl: bridge, @@ -67,24 +57,25 @@ func NewSynchronizerTest( genBlockNumber: genBlockNumber, cfg: cfg, networkID: networkID, - chExitRootEvent: chExitRootEvent, + chsExitRootEvent: chsExitRootEvent, chSynced: chSynced, - zkEVMClient: zkEVMClient, l1RollupExitRoot: ger.ExitRoots[1], synced: true, }, nil } return &ClientSynchronizer{ - bridgeCtrl: bridge, - storage: storage.(storageInterface), - etherMan: ethMan, - ctx: ctx, - cancelCtx: cancel, - genBlockNumber: genBlockNumber, - cfg: cfg, - chSynced: chSynced, - networkID: networkID, - synced: true, + bridgeCtrl: bridge, + storage: storage.(storageInterface), + etherMan: ethMan, + ctx: ctx, + cancelCtx: cancel, + genBlockNumber: genBlockNumber, + cfg: cfg, + chSynced: chSynced, + chExitRootEventL2: chExitRootEventL2, + zkEVMClient: zkEVMClient, + networkID: networkID, + synced: true, }, nil } @@ -96,13 +87,12 @@ func TestSyncGer(t *testing.T) { SyncChunkSize: 10, } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) parentCtx := context.Background() - sync, err := NewSynchronizerTest(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -206,6 +196,118 @@ func TestSyncGer(t *testing.T) { Return(&blocks[1].GlobalExitRoots[0], nil). Once() + return sync + } + + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} + +func TestSyncTrustedGer(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + m.Etherman.On("GetNetworkID").Return(uint(1)) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + parentCtx := context.Background() + sync, err := NewSynchronizerTest(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentCtx.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethHeader0.Hash()} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + lastBlock := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64()} + var networkID uint = 1 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock, nil) + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock.BlockNumber). + Return(ethBlock0, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader1, nil). + Once() + + ethermanBlock0 := etherman.Block{ + BlockHash: ethBlock0.Hash(), + NetworkID: 1, + } + ethermanBlock1 := etherman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock1.Hash(), + NetworkID: 1, + } + blocks := []etherman.Block{ethermanBlock0, ethermanBlock1} + order := map[common.Hash][]etherman.Order{ + ethBlock1.Hash(): {}, + } + + fromBlock := ethBlock0.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock1.NumberU64() { + toBlock = ethBlock1.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("AddBlock", ctx, &blocks[1], m.DbTx). + Return(uint64(1), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Run(func(args mock.Arguments) { sync.Stop() }). + Return(nil). + Once() + g := common.HexToHash("0xb14c74e4dddf25627a745f46cae6ac98782e2783c3ccc28107c8210e60d58861") m.ZkEVMClient. @@ -223,6 +325,7 @@ func TestSyncGer(t *testing.T) { Once() ger := ðerman.GlobalExitRoot{ + NetworkID: 1, GlobalExitRoot: g, ExitRoots: []common.Hash{ exitRootResponse.MainnetExitRoot, @@ -253,7 +356,6 @@ func TestSyncGer(t *testing.T) { require.NoError(t, err) }) } - func TestReorg(t *testing.T) { setupMocks := func(m *mocks) Synchronizer { genBlockNumber := uint64(0) @@ -263,12 +365,11 @@ func TestReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -390,12 +491,6 @@ func TestReorg(t *testing.T) { Return(nil). Once() - ger := common.Hash{} - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader3bis, nil). @@ -491,33 +586,6 @@ func TestReorg(t *testing.T) { }). Once() - ger = common.HexToHash("0x01") - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - - exitRoots := &rpcTypes.ExitRoots{ - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - } - m.ZkEVMClient. - On("ExitRootsByGER", ctx, ger). - Return(exitRoots, nil). - Once() - - fullGer := ðerman.GlobalExitRoot{ - GlobalExitRoot: ger, - ExitRoots: []common.Hash{ - exitRoots.MainnetExitRoot, - exitRoots.RollupExitRoot, - }, - } - m.Storage. - On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). - Return(true, nil). - Once() - return sync } m := mocks{ @@ -545,12 +613,11 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -653,12 +720,6 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { Return(nil). Once() - ger := common.Hash{} - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader3, nil). @@ -680,33 +741,6 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { m.Etherman. On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). Return(blocks, order, nil). - Once() - - ger = common.HexToHash("0x01") - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - - exitRoots := &rpcTypes.ExitRoots{ - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - } - m.ZkEVMClient. - On("ExitRootsByGER", ctx, ger). - Return(exitRoots, nil). - Once() - - fullGer := ðerman.GlobalExitRoot{ - GlobalExitRoot: ger, - ExitRoots: []common.Hash{ - exitRoots.MainnetExitRoot, - exitRoots.RollupExitRoot, - }, - } - m.Storage. - On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). - Return(true, nil). Run(func(args mock.Arguments) { sync.Stop() }). @@ -739,12 +773,11 @@ func TestRegularReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -837,12 +870,6 @@ func TestRegularReorg(t *testing.T) { Return(nil). Once() - ger := common.Hash{} - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader2bis, nil). @@ -924,33 +951,6 @@ func TestRegularReorg(t *testing.T) { m.Storage. On("Commit", ctx, m.DbTx). Return(nil). - Once() - - ger = common.HexToHash("0x01") - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - - exitRoots := &rpcTypes.ExitRoots{ - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - } - m.ZkEVMClient. - On("ExitRootsByGER", ctx, ger). - Return(exitRoots, nil). - Once() - - fullGer := ðerman.GlobalExitRoot{ - GlobalExitRoot: ger, - ExitRoots: []common.Hash{ - exitRoots.MainnetExitRoot, - exitRoots.RollupExitRoot, - }, - } - m.Storage. - On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). - Return(true, nil). Run(func(args mock.Arguments) { sync.Stop() }). @@ -983,12 +983,11 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -1110,12 +1109,6 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { Return(nil). Once() - ger := common.Hash{} - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader3, nil). @@ -1164,33 +1157,6 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { m.Storage. On("Commit", ctx, m.DbTx). Return(nil). - Once() - - ger = common.HexToHash("0x01") - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - - exitRoots := &rpcTypes.ExitRoots{ - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - } - m.ZkEVMClient. - On("ExitRootsByGER", ctx, ger). - Return(exitRoots, nil). - Once() - - fullGer := ðerman.GlobalExitRoot{ - GlobalExitRoot: ger, - ExitRoots: []common.Hash{ - exitRoots.MainnetExitRoot, - exitRoots.RollupExitRoot, - }, - } - m.Storage. - On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). - Return(true, nil). Run(func(args mock.Arguments) { sync.Stop() }). @@ -1223,12 +1189,11 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Etherman.On("GetNetworkID").Return(uint(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() - m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) go func() { @@ -1346,12 +1311,6 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { Return(nil). Once() - ger := common.Hash{} - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - m.Etherman. On("HeaderByNumber", mock.Anything, n). Return(ethHeader2bis, nil). @@ -1392,33 +1351,6 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { m.Storage. On("Commit", ctx, m.DbTx). Return(nil). - Once() - - ger = common.HexToHash("0x01") - m.ZkEVMClient. - On("GetLatestGlobalExitRoot", ctx). - Return(ger, nil). - Once() - - exitRoots := &rpcTypes.ExitRoots{ - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - } - m.ZkEVMClient. - On("ExitRootsByGER", ctx, ger). - Return(exitRoots, nil). - Once() - - fullGer := ðerman.GlobalExitRoot{ - GlobalExitRoot: ger, - ExitRoots: []common.Hash{ - exitRoots.MainnetExitRoot, - exitRoots.RollupExitRoot, - }, - } - m.Storage. - On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). - Return(true, nil). Run(func(args mock.Arguments) { sync.Stop() }). diff --git a/test/benchmark/api_test.go b/test/benchmark/api_test.go index 18d5f546..02590308 100644 --- a/test/benchmark/api_test.go +++ b/test/benchmark/api_test.go @@ -108,6 +108,7 @@ func initServer(b *testing.B, bench benchmark) *bridgectrl.BridgeController { } else { var isUpdated bool isUpdated, err = store.AddTrustedGlobalExitRoot(context.TODO(), ðerman.GlobalExitRoot{ + NetworkID: 1, GlobalExitRoot: bridgectrl.Hash(common.BytesToHash(roots[0]), common.BytesToHash(roots[1])), ExitRoots: []common.Hash{common.BytesToHash(roots[0]), common.BytesToHash(roots[1])}, }, dbTx) diff --git a/test/e2e/bridge_test.go b/test/e2e/bridge_test.go index 48a49f30..e624be01 100644 --- a/test/e2e/bridge_test.go +++ b/test/e2e/bridge_test.go @@ -11,17 +11,13 @@ import ( "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl" "github.com/0xPolygonHermez/zkevm-bridge-service/db" + "github.com/0xPolygonHermez/zkevm-bridge-service/log" "github.com/0xPolygonHermez/zkevm-bridge-service/server" "github.com/0xPolygonHermez/zkevm-bridge-service/test/operations" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) -var ( - l1BridgeAddr = common.HexToAddress("0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E") - l2BridgeAddr = common.HexToAddress("0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E") -) - // TestE2E tests the flow of deposit and withdraw funds using the vector func TestE2E(t *testing.T) { if testing.Short() { @@ -78,17 +74,16 @@ func TestE2E(t *testing.T) { tokenAddr := common.Address{} // This means is eth destAddr := common.HexToAddress("0xc949254d682d8c9ad5682521675b8f43b102aec4") - l1Balance, err := opsman.CheckAccountBalance(ctx, operations.L1, &l1BridgeAddr) + // Check L2 funds + balance, err := opsman.CheckAccountBalance(ctx, operations.L2, &destAddr) require.NoError(t, err) - t.Logf("L1 Bridge Balance: %v", l1Balance) + initL2Balance := big.NewInt(0) + require.Equal(t, 0, balance.Cmp(initL2Balance)) err = opsman.SendL1Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr) require.NoError(t, err) - l1Balance, err = opsman.CheckAccountBalance(ctx, operations.L1, &l1BridgeAddr) - require.NoError(t, err) - t.Logf("L1 Bridge Balance: %v", l1Balance) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) require.NoError(t, err) t.Logf("Before deposit global exit root: %v", globalExitRootSMC) t.Logf("After deposit global exit root: %v", globalExitRoot2) @@ -97,11 +92,6 @@ func TestE2E(t *testing.T) { // Get Bridge Info By DestAddr deposits, err := opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) - // Check L2 funds - balance, err := opsman.CheckAccountBalance(ctx, operations.L2, &destAddr) - require.NoError(t, err) - initL2Balance := big.NewInt(0) - require.Equal(t, 0, balance.Cmp(initL2Balance)) t.Log("Deposit: ", deposits[0]) // Check the claim tx err = opsman.CheckClaim(ctx, deposits[0]) @@ -118,14 +108,8 @@ func TestE2E(t *testing.T) { // Send L2 Deposit to withdraw the some funds destNetwork = 0 amount = new(big.Int).SetUint64(1000000000000000000) - l2Balance, err := opsman.CheckAccountBalance(ctx, operations.L2, &l2BridgeAddr) - require.NoError(t, err) - t.Logf("L2 Bridge Balance: %v", l2Balance) err = opsman.SendL2Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr, operations.L2) require.NoError(t, err) - l2Balance, err = opsman.CheckAccountBalance(ctx, operations.L2, &l2BridgeAddr) - require.NoError(t, err) - t.Logf("L2 Bridge Balance: %v", l2Balance) // Get Bridge Info By DestAddr deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) @@ -157,6 +141,7 @@ func TestE2E(t *testing.T) { balance, err = opsman.CheckAccountBalance(ctx, operations.L2, &destAddr) require.NoError(t, err) require.True(t, big.NewInt(9000000000000000000).Cmp(balance) > 0) + log.Debug("L1-L2 eth bridge end") }) t.Run("L1-L2 token bridge", func(t *testing.T) { @@ -196,15 +181,12 @@ func TestE2E(t *testing.T) { time.Sleep(3 * time.Second) // wait for sync token_wrapped event tokenWrapped, err := opsman.GetTokenWrapped(ctx, 0, tokenAddr, false) require.NoError(t, err) - balance2, err := opsman.CheckAccountTokenBalance(ctx, "l2", tokenWrapped.WrappedTokenAddress, &destAddr) - require.NoError(t, err) - t.Log("Init account balance l2: ", balance2) // Second deposit err = opsman.SendL1Deposit(ctx, tokenAddr, amount1, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) require.NoError(t, err) t.Logf("Before deposits global exit root: %v", globalExitRootSMC) t.Logf("After deposits global exit root: %v", globalExitRoot2) @@ -240,6 +222,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) t.Log("Balance tokenWrapped: ", balance) require.Equal(t, new(big.Int).SetUint64(1500000000000000000), balance) + log.Debug("L1-L2 token bridge end") }) t.Run("Reversal ERC20", func(t *testing.T) { @@ -300,32 +283,28 @@ func TestE2E(t *testing.T) { require.Equal(t, amount, balance2) // Check globalExitRoot - globalExitRoot3, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot3, err := opsman.GetLatestGlobalExitRootFromL1(ctx) require.NoError(t, err) - // Send L2 Deposit to withdraw the some funds + // Check L2 funds + balance, err = opsman.CheckAccountTokenBalance(ctx, operations.L2, tokenAddr, &destAddr) + require.NoError(t, err) + t.Log("balance: ", balance) + require.Equal(t, 0, big.NewInt(0).Cmp(balance)) + // Send L1 Deposit to withdraw the some funds destNetwork = 1 amount = new(big.Int).SetUint64(600000000000000000) err = opsman.SendL1Deposit(ctx, tokenWrapped.WrappedTokenAddress, amount, destNetwork, &destAddr) require.NoError(t, err) - // Get Bridge Info By DestAddr - deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) + // Check the claim tx + err = opsman.CheckClaim(ctx, deposits[0]) require.NoError(t, err) - t.Log("Deposit 2: ", deposits[0]) + time.Sleep(2 * time.Second) // Check globalExitRoot - globalExitRoot4, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot4, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) require.NoError(t, err) t.Logf("Global3 %+v: ", globalExitRoot3) t.Logf("Global4 %+v: ", globalExitRoot4) require.NotEqual(t, globalExitRoot3.ExitRoots[0], globalExitRoot4.ExitRoots[0]) - // Check L2 funds - balance, err = opsman.CheckAccountTokenBalance(ctx, operations.L2, tokenAddr, &destAddr) - require.NoError(t, err) - t.Log("balance: ", balance) - require.Equal(t, 0, big.NewInt(0).Cmp(balance)) - t.Log("deposits[0]: ", deposits[0]) - // Check the claim tx - err = opsman.CheckClaim(ctx, deposits[0]) - require.NoError(t, err) // Check L2 funds to see if the amount has been increased balance, err = opsman.CheckAccountTokenBalance(ctx, operations.L2, tokenAddr, &destAddr) require.NoError(t, err) @@ -334,6 +313,7 @@ func TestE2E(t *testing.T) { balance, err = opsman.CheckAccountTokenBalance(ctx, operations.L1, tokenWrapped.WrappedTokenAddress, &destAddr) require.NoError(t, err) require.Equal(t, 0, big.NewInt(400000000000000000).Cmp(balance)) + log.Debug("Reversal ERC20 end") }) t.Run("ERC20", func(t *testing.T) { @@ -356,7 +336,7 @@ func TestE2E(t *testing.T) { err = opsman.SendL1Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) require.NoError(t, err) t.Logf("Before deposit global exit root: %v", globalExitRootSMC) t.Logf("After deposit global exit root: %v", globalExitRoot2) @@ -417,13 +397,14 @@ func TestE2E(t *testing.T) { balance, err = opsman.CheckAccountTokenBalance(ctx, operations.L2, tokenWrapped.WrappedTokenAddress, &destAddr) require.NoError(t, err) require.Equal(t, big.NewInt(2000000000000000000), balance) + log.Debug("ERC20 end") }) t.Run("Multi deposits", func(t *testing.T) { /* 1. Do 3 deposits/bridges 2. Do 2 more deposits - 3. Claim the first 3 deposits in L2 + 3. Check the first 3 deposits in L2 */ // Check initial globalExitRoot. @@ -455,7 +436,7 @@ func TestE2E(t *testing.T) { err = opsman.SendL1Deposit(ctx, tokenAddr, amount3, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) require.NoError(t, err) t.Logf("Before deposits global exit root: %v", globalExitRootSMC) t.Logf("After deposits global exit root: %v", globalExitRoot2) @@ -465,7 +446,6 @@ func TestE2E(t *testing.T) { deposits, err := opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) t.Log("Deposits: ", deposits[:2]) - t.Log("Before getClaimData: ", deposits[2].NetworkId, deposits[2].DepositCnt) // Fourth deposit err = opsman.SendL1Deposit(ctx, tokenAddr, amount1, destNetwork, &origAddr) require.NoError(t, err) @@ -475,6 +455,10 @@ func TestE2E(t *testing.T) { // Check the claim tx err = opsman.CheckClaim(ctx, deposits[0]) require.NoError(t, err) + err = opsman.CheckClaim(ctx, deposits[1]) + require.NoError(t, err) + err = opsman.CheckClaim(ctx, deposits[2]) + require.NoError(t, err) time.Sleep(3 * time.Second) // wait for sync token_wrapped event tokenWrapped, err := opsman.GetTokenWrapped(ctx, 0, tokenAddr, false) require.NoError(t, err) @@ -483,15 +467,8 @@ func TestE2E(t *testing.T) { balance, err = opsman.CheckAccountTokenBalance(ctx, "l2", tokenWrapped.WrappedTokenAddress, &destAddr) require.NoError(t, err) t.Log("Balance tokenWrapped: ", balance) - - // Check the claim tx - err = opsman.CheckClaim(ctx, deposits[0]) - require.NoError(t, err) - // Check L2 funds to see if the amount has been increased - balance, err = opsman.CheckAccountTokenBalance(ctx, "l2", tokenWrapped.WrappedTokenAddress, &destAddr) - require.NoError(t, err) - t.Log("Balance tokenWrapped: ", balance) require.Equal(t, new(big.Int).SetUint64(6000000000000000000), balance) + log.Debug("Multi deposits end") }) t.Run("Bridge Message Test", func(t *testing.T) { @@ -536,6 +513,7 @@ func TestE2E(t *testing.T) { t.Logf("globalExitRoot: %+v", globaExitRoot) err = opsman.SendL1Claim(ctx, deposits[0], smtProof, smtRollupProof, globaExitRoot) require.NoError(t, err) + log.Debug("Bridge Message Test end") }) t.Run("Bridge Message Authorized Account Test", func(t *testing.T) { // Test L1 Bridge Message @@ -568,7 +546,6 @@ func TestE2E(t *testing.T) { err = opsman.SendL2BridgeMessage(ctx, destAddr, destNetwork, amount, []byte("metadata 4")) require.NoError(t, err) - // Get Bridge Info By DestAddr deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) @@ -579,5 +556,6 @@ func TestE2E(t *testing.T) { t.Logf("globalExitRoot: %+v", globaExitRoot) err = opsman.SendL1Claim(ctx, deposits[0], smtProof, smtRollupProof, globaExitRoot) require.NoError(t, err) + log.Debug("Bridge Message Authorized Account Test end") }) } diff --git a/test/e2e/compress_test.go b/test/e2e/compress_test.go index 168afbcc..89f6365c 100644 --- a/test/e2e/compress_test.go +++ b/test/e2e/compress_test.go @@ -12,12 +12,13 @@ import ( "github.com/0xPolygonHermez/zkevm-bridge-service/bridgectrl" "github.com/0xPolygonHermez/zkevm-bridge-service/db" + "github.com/0xPolygonHermez/zkevm-bridge-service/log" "github.com/0xPolygonHermez/zkevm-bridge-service/server" "github.com/0xPolygonHermez/zkevm-bridge-service/test/operations" - "github.com/0xPolygonHermez/zkevm-bridge-service/log" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) + const ( defaultInterval = 10 * time.Second defaultDeadline = 600 * time.Second @@ -27,10 +28,15 @@ func multiDepositFromL1(ctx context.Context, opsman *operations.Manager, destAdd amount := new(big.Int).SetUint64(250000000000000000) tokenAddr := common.Address{} // This means is eth var destNetwork uint32 = 1 - // L1 Deposit + // L1 Deposit to R1 err := opsman.SendMultipleL1Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr, 30) require.NoError(t, err) + // L1 Deposit to R2 + destNetwork = 2 + err = opsman.SendMultipleL1Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr, 30) + require.NoError(t, err) + deposits, err := opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) time.Sleep(5 * time.Second) // Delay to give time to the synchronizer to read all events @@ -81,8 +87,9 @@ func TestClaimCompressor(t *testing.T) { }, } - os.Setenv("ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED", "true") - require.NoError(t, operations.StartBridge()) + err := os.Setenv("ZKEVM_BRIDGE_CLAIMTXMANAGER_GROUPINGCLAIMS_ENABLED", "true") + require.NoError(t, err) + require.NoError(t, operations.StartBridge3()) opsman, err := operations.NewManager(ctx, opsCfg) require.NoError(t, err) const st time.Duration = 20 // wait until the syncing is finished @@ -95,13 +102,13 @@ func TestClaimCompressor(t *testing.T) { // Check number claim events numberClaims, err := opsman.GetNumberClaims(ctx, destAddr.String()) require.NoError(t, err) - require.Equal(t, 30, numberClaims) + require.Equal(t, 60, numberClaims) // Check L2 balance balance, err := opsman.CheckAccountBalance(ctx, "l2", &destAddr) require.NoError(t, err) require.Equal(t, "7500000000000000435", balance.String()) maxGroupID, err := opsman.GetLatestMonitoredTxGroupID(ctx) require.NoError(t, err) - require.Equal(t, uint64(2), maxGroupID) + require.Equal(t, uint64(4), maxGroupID) }) } diff --git a/test/e2e/l2_l2_test.go b/test/e2e/l2_l2_test.go index b3812430..a8b806de 100644 --- a/test/e2e/l2_l2_test.go +++ b/test/e2e/l2_l2_test.go @@ -23,7 +23,7 @@ func TestL2L2(t *testing.T) { ctx := context.Background() opsman1, err := operations.GetOpsman(ctx, "http://localhost:8123", "test_db", "8080", "9090", "5435", 1) require.NoError(t, err) - opsman2, err := operations.GetOpsman(ctx, "http://localhost:8124", "test_db", "8081", "9091", "5438", 2) + opsman2, err := operations.GetOpsman(ctx, "http://localhost:8124", "test_db", "8080", "9090", "5435", 2) require.NoError(t, err) t.Run("L2-L2 eth bridge", func(t *testing.T) { @@ -60,7 +60,8 @@ func TestL2L2(t *testing.T) { // Check L2 destination funds balance, err := opsman2.CheckAccountBalance(ctx, operations.L2, &address) require.NoError(t, err) - v, _ := big.NewInt(0).SetString("100000000000000000000000", 10) + v, _ := big.NewInt(0).SetString("99999999209233000000000", 10) + t.Log("balance: ", balance) require.Equal(t, 0, v.Cmp(balance)) // Get the claim data smtProof, smtRollupProof, globaExitRoot, err := opsman1.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) diff --git a/test/e2e/multiplerollups_test.go b/test/e2e/multiplerollups_test.go index e03f8fa0..cc9c781b 100644 --- a/test/e2e/multiplerollups_test.go +++ b/test/e2e/multiplerollups_test.go @@ -27,7 +27,7 @@ func TestMultipleRollups(t *testing.T) { ctx := context.Background() opsman1, err := operations.GetOpsman(ctx, "http://localhost:8123", "test_db", "8080", "9090", "5435", 1) require.NoError(t, err) - opsman2, err := operations.GetOpsman(ctx, "http://localhost:8124", "test_db", "8081", "9091", "5438", 2) + opsman2, err := operations.GetOpsman(ctx, "http://localhost:8124", "test_db", "8080", "9090", "5435", 2) require.NoError(t, err) // Fund L2 sequencer for rollup 2. This is super dirty, but have no better way to do this at the moment @@ -200,7 +200,7 @@ func bridge( } else { tokenAddr, err := opsman.GetTokenAddress(ctx, operations.L2, bd.originTokenNet, bd.originTokenAddr) require.NoError(t, err) - log.Debugf("depositing %d tokens of addr %s on Rollup %d to Network %d", bd.amount.Uint64(), tokenAddr, bd.destNet) + log.Debugf("depositing %d tokens of addr %s to Network %d", bd.amount.Uint64(), tokenAddr, bd.destNet) err = opsman.SendL2Deposit(ctx, tokenAddr, bd.amount, uint32(bd.destNet), &destAddr, operations.L2) require.NoError(t, err) } diff --git a/test/operations/interfaces.go b/test/operations/interfaces.go index 89f50d73..94b42516 100644 --- a/test/operations/interfaces.go +++ b/test/operations/interfaces.go @@ -12,9 +12,9 @@ import ( // StorageInterface is a storage interface. type StorageInterface interface { GetLastBlock(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.Block, error) - GetLatestExitRoot(ctx context.Context, isRollup bool, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) - GetLatestTrustedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetLatestTrustedExitRoot(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetTokenWrapped(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64, dbTx pgx.Tx) error diff --git a/test/operations/manager.go b/test/operations/manager.go index 70207674..aa5a7361 100644 --- a/test/operations/manager.go +++ b/test/operations/manager.go @@ -122,7 +122,7 @@ func NewManager(ctx context.Context, cfg *Config) (*Manager, error) { if err != nil { return nil, err } - bService := server.NewBridgeService(cfg.BS, cfg.BT.Height, []uint{0, cfg.L2NetworkID}, pgst, cfg.L2NetworkID) + bService := server.NewBridgeService(cfg.BS, cfg.BT.Height, []uint{0, cfg.L2NetworkID}, pgst) opsman.storage = st.(StorageInterface) opsman.bridgetree = bt opsman.bridgeService = bService @@ -218,16 +218,14 @@ func (m *Manager) GetNumberClaims(ctx context.Context, destAddr string) (int, er } // SendL1Deposit sends a deposit from l1 to l2. -func (m *Manager) SendL1Deposit(ctx context.Context, tokenAddr common.Address, amount *big.Int, - destNetwork uint32, destAddr *common.Address, -) error { +func (m *Manager) SendL1Deposit(ctx context.Context, tokenAddr common.Address, amount *big.Int, destNetwork uint32, destAddr *common.Address) error { client := m.clients[L1] auth, err := client.GetSigner(ctx, accHexPrivateKeys[L1]) if err != nil { return err } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, false, nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, uint(destNetwork), nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -238,7 +236,7 @@ func (m *Manager) SendL1Deposit(ctx context.Context, tokenAddr common.Address, a } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, false) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, uint(destNetwork)) } // SendMultipleL1Deposit sends a deposit from l1 to l2. @@ -272,8 +270,12 @@ func (m *Manager) SendL2Deposit(ctx context.Context, tokenAddr common.Address, a if err != nil { return err } - - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, true, nil) + networkID, err := client.Bridge.NetworkID(&bind.CallOpts{Pending: false}) + if err != nil { + log.Error("error getting networkID: ", networkID) + return err + } + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, uint(networkID), uint(destNetwork), nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -284,7 +286,7 @@ func (m *Manager) SendL2Deposit(ctx context.Context, tokenAddr common.Address, a } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, true) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, uint(networkID), uint(destNetwork)) } // SendL1BridgeMessage bridges a message from l1 to l2. @@ -301,7 +303,7 @@ func (m *Manager) SendL1BridgeMessage(ctx context.Context, destAddr common.Addre } } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, true, nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, uint(destNetwork), nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -313,7 +315,7 @@ func (m *Manager) SendL1BridgeMessage(ctx context.Context, destAddr common.Addre } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, false) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, uint(destNetwork)) } // SendL2BridgeMessage bridges a message from l2 to l1. @@ -324,7 +326,13 @@ func (m *Manager) SendL2BridgeMessage(ctx context.Context, destAddr common.Addre return err } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, true, nil) + networkID, err := client.Bridge.NetworkID(&bind.CallOpts{Pending: false}) + if err != nil { + log.Error("error getting networkID: ", networkID) + return err + } + + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, uint(networkID), uint(destNetwork), nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -336,7 +344,7 @@ func (m *Manager) SendL2BridgeMessage(ctx context.Context, destAddr common.Addre } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, true) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, uint(networkID), uint(destNetwork)) } // Setup creates all the required components and initializes them according to @@ -455,31 +463,6 @@ func (m *Manager) AddFunds(ctx context.Context) error { return WaitTxToBeMined(ctx, client.Client, tx, txPolTransferTimeout) } -// Teardown stops all the components. -func Teardown() error { - err := StopBridge() - if err != nil { - return err - } - - err = stopZKEVMNode() - if err != nil { - return err - } - - err = stopProver() - if err != nil { - return err - } - - err = stopNetwork() - if err != nil { - return err - } - - return nil -} - func (m *Manager) startNetwork() error { if err := stopNetwork(); err != nil { return err @@ -541,6 +524,26 @@ func runCmd(c *exec.Cmd) error { return c.Run() } +// StartBridge3 restarts the bridge service. +func StartBridge3() error { + if err := StopBridge3(); err != nil { + return err + } + cmd := exec.Command(makeCmd, "run-bridge-3") + err := runCmd(cmd) + if err != nil { + return err + } + // Wait bridge to be ready + return poll(defaultInterval, defaultDeadline, bridgeUpCondition) +} + +// StopBridge3 stops the bridge service. +func StopBridge3() error { + cmd := exec.Command(makeCmd, "stop-bridge-3") + return runCmd(cmd) +} + // StartBridge restarts the bridge service. func StartBridge() error { if err := StopBridge(); err != nil { @@ -671,8 +674,8 @@ func (m *Manager) SendL2Claim(ctx context.Context, deposit *pb.Deposit, smtProof } // GetTrustedGlobalExitRootSynced reads the latest globalexitroot of a batch proposal from db -func (m *Manager) GetTrustedGlobalExitRootSynced(ctx context.Context) (*etherman.GlobalExitRoot, error) { - return m.storage.GetLatestTrustedExitRoot(ctx, nil) +func (m *Manager) GetTrustedGlobalExitRootSynced(ctx context.Context, networkID uint) (*etherman.GlobalExitRoot, error) { + return m.storage.GetLatestTrustedExitRoot(ctx, networkID, nil) } // GetLatestGlobalExitRootFromL1 reads the latest synced globalexitroot in l1 from db @@ -784,7 +787,7 @@ func (m *Manager) UpdateBlocksForTesting(ctx context.Context, networkID uint, bl } // WaitExitRootToBeSynced waits until new exit root is synced. -func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *etherman.GlobalExitRoot, isRollup bool) error { +func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *etherman.GlobalExitRoot, networkID, destNetwork uint) error { log.Debugf("WaitExitRootToBeSynced: %+v", orgExitRoot) if orgExitRoot == nil { orgExitRoot = ðerman.GlobalExitRoot{ @@ -792,7 +795,7 @@ func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *ether } } return operations.Poll(defaultInterval, waitRootSyncDeadline, func() (bool, error) { - exitRoot, err := m.storage.GetLatestExitRoot(ctx, isRollup, nil) + exitRoot, err := m.storage.GetLatestExitRoot(ctx, networkID, destNetwork, nil) if err != nil { if err == gerror.ErrStorageNotFound { return false, nil @@ -800,7 +803,7 @@ func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *ether return false, err } tID := 0 - if isRollup { + if networkID != 0 { tID = 1 } return exitRoot.ExitRoots[tID] != orgExitRoot.ExitRoots[tID], nil diff --git a/test/operations/mockserver.go b/test/operations/mockserver.go index 192300ba..2152b8f2 100644 --- a/test/operations/mockserver.go +++ b/test/operations/mockserver.go @@ -44,6 +44,6 @@ func RunMockServer(dbType string, height uint8, networks []uint) (*bridgectrl.Br MaxPageLimit: 100, //nolint:gomnd BridgeVersion: "v1", } - bridgeService := server.NewBridgeService(cfg, btCfg.Height, networks, store, rollupID) + bridgeService := server.NewBridgeService(cfg, btCfg.Height, networks, store) return bt, store, server.RunServer(cfg, bridgeService) } diff --git a/test/scripts/claim/main.go b/test/scripts/claim/main.go index e97161c8..0005eed3 100644 --- a/test/scripts/claim/main.go +++ b/test/scripts/claim/main.go @@ -60,7 +60,7 @@ func main() { for i := 0; i < len(proof.MerkleProof); i++ { log.Debug("smtProof: ", proof.MerkleProof[i]) smtProof[i] = common.HexToHash(proof.MerkleProof[i]) - log.Debug("smtRollupProof: ", proof.MerkleProof[i]) + log.Debug("smtRollupProof: ", proof.RollupMerkleProof[i]) smtRollupProof[i] = common.HexToHash(proof.RollupMerkleProof[i]) } globalExitRoot := ðerman.GlobalExitRoot{ diff --git a/test/scripts/readLatestLER/main.go b/test/scripts/readLatestLER/main.go new file mode 100644 index 00000000..b2caf773 --- /dev/null +++ b/test/scripts/readLatestLER/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "github.com/0xPolygonHermez/zkevm-bridge-service/log" + "github.com/0xPolygonHermez/zkevm-node/etherman/smartcontracts/polygonzkevmglobalexitroot" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +const ( + gerManAddr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" + + nodeURL = "http://localhost:8124" +) + +func main() { + client, err := ethclient.Dial(nodeURL) + if err != nil { + log.Fatal("error conecting to the node. Error: ", err) + } + g, err := polygonzkevmglobalexitroot.NewPolygonzkevmglobalexitroot(common.HexToAddress(gerManAddr), client) + if err != nil { + log.Fatal("Error: ", err) + } + rollupExitRoot, err := g.LastRollupExitRoot(&bind.CallOpts{}) + if err != nil { + log.Fatal("Error: ", err) + } + // ger, err := g.GlobalExitRootMap(&bind.CallOpts{}) + // if err != nil { + // log.Fatal("Error: ", err) + // } + // log.Info("ger! ", common.BytesToAddress(ger[:])) + log.Info("rollupExitRoot! ", common.BytesToHash(rollupExitRoot[:])) +} diff --git a/utils/client.go b/utils/client.go index cffc5092..c9cadcae 100644 --- a/utils/client.go +++ b/utils/client.go @@ -244,6 +244,22 @@ func (c *Client) SendClaim(ctx context.Context, deposit *pb.Deposit, smtProof [m globalIndex, _ := big.NewInt(0).SetString(deposit.GlobalIndex, 0) if deposit.LeafType == LeafTypeAsset { tx, err = c.Bridge.ClaimAsset(auth, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], deposit.OrigNet, common.HexToAddress(deposit.OrigAddr), deposit.DestNet, common.HexToAddress(deposit.DestAddr), amount, common.FromHex(deposit.Metadata)) + if err != nil { + a, _ := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() + input, err3 := a.Pack("claimAsset", smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], deposit.OrigNet, common.HexToAddress(deposit.OrigAddr), deposit.DestNet, common.HexToAddress(deposit.DestAddr), amount, common.FromHex(deposit.Metadata)) + if err3 != nil { + log.Error("error packing call. Error: ", err3) + } + log.Warnf(`Use the next command to debug it manually. + curl --location --request POST 'http://localhost:8123' \ + --header 'Content-Type: application/json' \ + --data-raw '{ + "jsonrpc": "2.0", + "method": "eth_call", + "params": [{"from": "%s","to":"%s","data":"0x%s"},"latest"], + "id": 1 + }'`, auth.From, "", common.Bytes2Hex(input)) + } } else if deposit.LeafType == LeafTypeMessage { tx, err = c.Bridge.ClaimMessage(auth, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], deposit.OrigNet, common.HexToAddress(deposit.OrigAddr), deposit.DestNet, common.HexToAddress(deposit.DestAddr), amount, common.FromHex(deposit.Metadata)) }