From b9d1b0e270c3c862364a0d6e57a4d08aa45ba45c Mon Sep 17 00:00:00 2001 From: pingke Date: Sat, 11 Nov 2023 15:33:16 +0800 Subject: [PATCH 01/68] use fallback ips if AdvertiseIP no be set --- ethstorage/p2p/config.go | 3 ++- ethstorage/p2p/discovery.go | 15 ++++++++++++++- ethstorage/p2p/node.go | 2 +- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/ethstorage/p2p/config.go b/ethstorage/p2p/config.go index c373c84a..4f2a9aa0 100644 --- a/ethstorage/p2p/config.go +++ b/ethstorage/p2p/config.go @@ -22,6 +22,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/net/conngater" cmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr" + ma "github.com/multiformats/go-multiaddr" ) var DefaultBootnodes = []*enode.Node{ @@ -47,7 +48,7 @@ type SetupP2P interface { // Host creates a libp2p host service. Returns nil, nil if p2p is disabled. Host(log log.Logger, reporter metrics.Reporter) (host.Host, error) // Discovery creates a disc-v5 service. Returns nil, nil, nil if discovery is disabled. - Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16) (*enode.LocalNode, *discover.UDPv5, error) + Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16, ips []ma.Multiaddr) (*enode.LocalNode, *discover.UDPv5, error) TargetPeers() uint SyncerParams() *protocol.SyncerParams GossipSetupConfigurables diff --git a/ethstorage/p2p/discovery.go b/ethstorage/p2p/discovery.go index 58be88fc..2ba25729 100644 --- a/ethstorage/p2p/discovery.go +++ b/ethstorage/p2p/discovery.go @@ -24,6 +24,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + ma "github.com/multiformats/go-multiaddr" ) const ( @@ -40,7 +41,7 @@ const ( p2pVersion = 0 ) -func (conf *Config) Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16) (*enode.LocalNode, *discover.UDPv5, error) { +func (conf *Config) Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16, addrs []ma.Multiaddr) (*enode.LocalNode, *discover.UDPv5, error) { if conf.NoDiscovery { return nil, nil, nil } @@ -50,6 +51,18 @@ func (conf *Config) Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16) localNode := enode.NewLocalNode(conf.DiscoveryDB, priv) if conf.AdvertiseIP != nil { localNode.SetStaticIP(conf.AdvertiseIP) + } else { + for _, addr := range addrs { + ipStr, err := addr.ValueForProtocol(4) + if err != nil { + continue + } + ip := net.ParseIP(ipStr) + if ip.IsPrivate() || ip.IsLoopback() { + continue + } + localNode.SetStaticIP(ip) + } } if conf.AdvertiseUDPPort != 0 { // explicitly advertised port gets priority localNode.SetFallbackUDP(int(conf.AdvertiseUDPPort)) diff --git a/ethstorage/p2p/node.go b/ethstorage/p2p/node.go index c997de98..720e0dfb 100644 --- a/ethstorage/p2p/node.go +++ b/ethstorage/p2p/node.go @@ -178,7 +178,7 @@ func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.EsConfig, } // All nil if disabled. - n.dv5Local, n.dv5Udp, err = setup.Discovery(log.New("p2p", "discv5"), l1ChainID, tcpPort) + n.dv5Local, n.dv5Udp, err = setup.Discovery(log.New("p2p", "discv5"), l1ChainID, tcpPort, n.host.Addrs()) if err != nil { return fmt.Errorf("failed to start discv5: %w", err) } From 472adbbfb9ecb34e33730c76f9bf79b7fea06722 Mon Sep 17 00:00:00 2001 From: pingke Date: Sat, 11 Nov 2023 15:45:09 +0800 Subject: [PATCH 02/68] fix --- ethstorage/p2p/discovery.go | 1 + 1 file changed, 1 insertion(+) diff --git a/ethstorage/p2p/discovery.go b/ethstorage/p2p/discovery.go index 2ba25729..22d35459 100644 --- a/ethstorage/p2p/discovery.go +++ b/ethstorage/p2p/discovery.go @@ -62,6 +62,7 @@ func (conf *Config) Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16, continue } localNode.SetStaticIP(ip) + break } } if conf.AdvertiseUDPPort != 0 { // explicitly advertised port gets priority From e2462164a24bb543ecc62bdac03c8e31c381c689 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 16:40:54 +0800 Subject: [PATCH 03/68] release with build --- .github/workflows/publish.yml | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/publish.yml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..760a3f48 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,31 @@ +# This workflow will publish an es-node release with a pre-build executable + +name: GitHub Actions ES Publish +run-name: ${{ github.actor }} is push code to main 🚀 +on: + push: + branches: [ "main", "build" ] + tags: + - 'v*' +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout code + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Build + run: go build ./cmd/es-node/... + + - name: Create Release + id: create_release + uses: softprops/action-gh-release@v1 + with: + files: es-node + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 27fe842f1fecd791ee88bd231693c2f0872c2165 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 16:48:27 +0800 Subject: [PATCH 04/68] test --- .github/workflows/publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 760a3f48..75c3d43f 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -20,12 +20,12 @@ jobs: go-version: '1.20' - name: Build - run: go build ./cmd/es-node/... + run: go build -o build/es-node ./cmd/es-node/... - name: Create Release id: create_release uses: softprops/action-gh-release@v1 with: - files: es-node + files: [build/es-node] env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 21b8e96a4444a84bc249341c15dcabfb990ab744 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 16:51:05 +0800 Subject: [PATCH 05/68] fix --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 75c3d43f..916e255d 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - - uses: actions/checkout@v3 + uses: actions/checkout@v3 - name: Set up Go uses: actions/setup-go@v4 From 79a18aa1e6e945dc39c8aa6764924e308b3cae17 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 16:52:36 +0800 Subject: [PATCH 06/68] test --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 916e255d..9fa92af7 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -26,6 +26,6 @@ jobs: id: create_release uses: softprops/action-gh-release@v1 with: - files: [build/es-node] + files: build/es-node env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From a2c200953b1b070f9cc7288cf20f16c66d696044 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 17:08:21 +0800 Subject: [PATCH 07/68] fix --- .github/workflows/publish.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 9fa92af7..7769d4f3 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,10 +1,9 @@ # This workflow will publish an es-node release with a pre-build executable name: GitHub Actions ES Publish -run-name: ${{ github.actor }} is push code to main 🚀 +run-name: ${{ github.actor }} is publish a release 🚀 on: push: - branches: [ "main", "build" ] tags: - 'v*' jobs: @@ -23,7 +22,6 @@ jobs: run: go build -o build/es-node ./cmd/es-node/... - name: Create Release - id: create_release uses: softprops/action-gh-release@v1 with: files: build/es-node From a6dba7630daba1f93d734f7ee7e51df752cebb8d Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 17:14:05 +0800 Subject: [PATCH 08/68] fix --- .github/workflows/publish.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7769d4f3..69170f68 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -4,6 +4,7 @@ name: GitHub Actions ES Publish run-name: ${{ github.actor }} is publish a release 🚀 on: push: + branches: [ "main", "build" ] tags: - 'v*' jobs: @@ -19,7 +20,9 @@ jobs: go-version: '1.20' - name: Build - run: go build -o build/es-node ./cmd/es-node/... + run: | + mkdir build + go build -o build/es-node ./cmd/es-node/... - name: Create Release uses: softprops/action-gh-release@v1 From 1e40492508f0ac1109591edb641889dadc5ca0e0 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 17:22:30 +0800 Subject: [PATCH 09/68] add tag --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 69170f68..9568ceec 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -4,7 +4,6 @@ name: GitHub Actions ES Publish run-name: ${{ github.actor }} is publish a release 🚀 on: push: - branches: [ "main", "build" ] tags: - 'v*' jobs: @@ -27,6 +26,7 @@ jobs: - name: Create Release uses: softprops/action-gh-release@v1 with: + tag_name: ${{ github.ref }} files: build/es-node env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 1401a33ec4cd2d9429055e7cad04bbb037fb7706 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 22 Nov 2023 17:55:57 +0800 Subject: [PATCH 10/68] fix --- .github/workflows/publish.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 9568ceec..73d2d491 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,7 +1,7 @@ # This workflow will publish an es-node release with a pre-build executable name: GitHub Actions ES Publish -run-name: ${{ github.actor }} is publish a release 🚀 +run-name: ${{ github.actor }} is publishing a release 🚀 on: push: tags: @@ -23,10 +23,13 @@ jobs: mkdir build go build -o build/es-node ./cmd/es-node/... + - name: Generate Changelog + run: echo "# Good things have arrived" > ${{ github.workspace }}-CHANGELOG.txt + - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} - files: build/es-node - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + release_name: Release ${{ github.ref }} + body_path: ${{ github.workspace }}-CHANGELOG.txt + files: build/es-node \ No newline at end of file From 98fa598b0a59b02fa20f946d3babd601b33480e1 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 23 Nov 2023 10:55:19 +0800 Subject: [PATCH 11/68] fix --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 73d2d491..dfba585e 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -30,6 +30,6 @@ jobs: uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} - release_name: Release ${{ github.ref }} + name: Release ${{ github.ref }} body_path: ${{ github.workspace }}-CHANGELOG.txt files: build/es-node \ No newline at end of file From 04d6f9f81191fe759afe5f8cf184731ed0fa2ea0 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Thu, 23 Nov 2023 15:15:44 +0800 Subject: [PATCH 12/68] local cache for metas and lastKvIdx --- ethstorage/downloader/downloader.go | 67 ++++++------ ethstorage/node/node.go | 6 +- ethstorage/node/node_mine_test.go | 5 +- ethstorage/p2p/node.go | 4 +- ethstorage/p2p/protocol/syncclient.go | 30 +++--- ethstorage/storage_manager.go | 150 +++++++++++++++++++++----- ethstorage/storage_manager_test.go | 6 +- 7 files changed, 182 insertions(+), 86 deletions(-) diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 968c9c7a..4158b477 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -30,11 +30,11 @@ const ( TrackSafe // 1 TrackFinalized // 2 - downloadBatchSize = 64 // 2 epoch + downloadBatchSize = 64 // 2 epoch ) var ( - downloaderPrefix = []byte("dl-") + downloaderPrefix = []byte("dl-") lastDownloadKey = []byte("last-download-block") ) @@ -53,10 +53,10 @@ type Downloader struct { latestHead int64 dumpDir string minDurationForBlobsRequest uint64 - + // Request to download new blobs dlLatestReq chan struct{} - dlFinalizedReq chan struct{} + dlFinalizedReq chan struct{} log log.Logger done chan struct{} @@ -80,11 +80,11 @@ type blockBlobs struct { func NewDownloader( - l1Source *eth.PollingClient, + l1Source *eth.PollingClient, l1Beacon *eth.BeaconClient, db ethdb.Database, - sm *ethstorage.StorageManager, - downloadStart int64, + sm *ethstorage.StorageManager, + downloadStart int64, downloadDump string, minDurationForBlobsRequest uint64, downloadThreadNum int, @@ -92,7 +92,7 @@ func NewDownloader( ) *Downloader{ sm.DownloadThreadNum = downloadThreadNum return &Downloader{ - Cache: NewBlobCache(), + Cache: NewBlobCache(), l1Source: l1Source, l1Beacon: l1Beacon, db: db, @@ -108,7 +108,7 @@ func NewDownloader( } // Start starts up the state loop. -func (s *Downloader) Start() error { +func (s *Downloader) Start() error { // user does NOT specify a download start in the flag if s.lastDownloadBlock == 0 { bs, err := s.db.Get(append(downloaderPrefix, lastDownloadKey...)) @@ -138,7 +138,10 @@ func (s *Downloader) Start() error { } } - s.sm.Reset(s.lastDownloadBlock) + err := s.sm.Reset(s.lastDownloadBlock) + if err != nil { + return err + } s.wg.Add(1) go s.eventLoop() @@ -158,7 +161,7 @@ func (s *Downloader) OnL1Finalized(finalized uint64) { } s.finalizedHead = int64(finalized) s.mu.Unlock() - + select { case s.dlFinalizedReq <- struct{}{}: return @@ -169,7 +172,7 @@ func (s *Downloader) OnL1Finalized(finalized uint64) { } func (s *Downloader) OnNewL1Head(head eth.L1BlockRef) { - s.mu.Lock() + s.mu.Lock() if s.latestHead > int64(head.Number) { s.log.Info("The tracking head is greater than new one, a reorg may happen", "tracking", s.latestHead, "new", head) } @@ -209,7 +212,7 @@ func (s *Downloader) downloadToCache() { return } end := s.latestHead - start := s.lastCacheBlock + start := s.lastCacheBlock if start == 0 { start = s.finalizedHead } @@ -218,7 +221,7 @@ func (s *Downloader) downloadToCache() { // @Qiang devnet-4 have issues to get blob event for the latest block, so if we need roll back to devnet-4 // we may need to change it to s.downloadRange(start, end, true) _, err := s.downloadRange(start + 1, end, true) - + if err == nil { s.lastCacheBlock = end } else { @@ -233,7 +236,7 @@ func (s *Downloader) download() { if (s.lastDownloadBlock > 0) && (trackHead - s.lastDownloadBlock > int64(s.minDurationForBlobsRequest)) { // TODO: @Qiang we can also enter into an recovery mode (e.g., scan local blobs to obtain a heal list, more complicated, will do later) - prompt := "Ethereum only keep blobs for one month, but it has been over one month since last blob download." + + prompt := "Ethereum only keep blobs for one month, but it has been over one month since last blob download." + "You may need to restart this node with full re-sync" s.log.Error(prompt) return @@ -270,14 +273,14 @@ func (s *Downloader) download() { // save lastDownloadedBlock into database bs := make([]byte, 8) binary.LittleEndian.PutUint64(bs, uint64(end)) - + err = s.db.Put(append(downloaderPrefix, lastDownloadKey...), bs) if err != nil { s.log.Error("Save lastDownloadedBlock into db error", "err", err) return } s.log.Info("LastDownloadedBlock saved into db", "lastDownloadedBlock", end) - + s.dumpBlobsIfNeeded(blobs) s.lastDownloadBlock = end @@ -288,17 +291,17 @@ func (s *Downloader) download() { s.Cache.Cleanup(uint64(trackHead)) } -// The entire downloading process consists of two phases: +// The entire downloading process consists of two phases: // 1. Downloading the blobs into the cache when they are not finalized, with the option toCache set to true. // 2. Writing the blobs into the shard file when they are finalized, with the option toCache set to false. // we will attempt to read the blobs from the cache initially. If they don't exist in the cache, we will download them instead. func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob, error) { ts := time.Now() - + if end < start { end = start } - + events, err := s.l1Source.FilterLogsByBlockRange(big.NewInt(int64(start)), big.NewInt(int64(end))) if err != nil { return nil, err @@ -306,7 +309,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob elBlocks, err := s.eventsToBlocks(events) if err != nil { return nil, err - } + } blobs := []blob{} for _, elBlock := range elBlocks { // attempt to read the blobs from the cache first @@ -317,22 +320,22 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob continue } else { s.log.Info( - "Don't find blob in the cache, will try to download directly", - "blockNumber", elBlock.number, - "start", start, + "Don't find blob in the cache, will try to download directly", + "blockNumber", elBlock.number, + "start", start, "end", end, "toCache", toCache, ) } - + clBlobs, err := s.l1Beacon.DownloadBlobs(s.l1Beacon.Timestamp2Slot(elBlock.timestamp)) if err != nil { s.log.Error("L1 beacon download blob error", "err", err) return nil, err } - + for _, elBlob := range elBlock.blobs { - clBlob, exists := clBlobs[elBlob.hash]; + clBlob, exists := clBlobs[elBlob.hash]; if !exists { s.log.Error("Did not find the event specified blob in the CL") @@ -344,7 +347,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob s.Cache.SetBlockBlobs(elBlock) } } - + s.log.Info("Download range", "cache", toCache, "start", start, "end", end, "blobNumber", len(blobs), "duration(ms)", time.Since(ts).Milliseconds()) return blobs, nil @@ -352,7 +355,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { if s.dumpDir != "" { - for _, blob := range blobs { + for _, blob := range blobs { fileName := filepath.Join(s.dumpDir, fmt.Sprintf("%s.dat", hex.EncodeToString(blob.data[:5]))) f, err := os.Create(fileName) if err != nil { @@ -360,7 +363,7 @@ func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { return } defer f.Close() - + writer := bufio.NewWriter(f) writer.WriteString(string(blob.data)) writer.Flush() @@ -384,13 +387,13 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { number: event.BlockNumber, hash: event.BlockHash, blobs: []*blob{}, - }) + }) } block := blocks[len(blocks) - 1] hash := common.Hash{} copy(hash[:], event.Topics[3][:]) - + blob := blob{ kvIndex: big.NewInt(0).SetBytes(event.Topics[1][:]), kvSize: big.NewInt(0).SetBytes(event.Topics[2][:]), diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 113552c8..0ad1df3c 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -252,7 +252,11 @@ func (n *EsNode) Start(ctx context.Context, cfg *Config) error { return err } - n.p2pNode.Start() + if err := n.p2pNode.Start(); err != nil { + n.log.Error("Could not start a p2pNode", "err", err) + return err + } + return nil } diff --git a/ethstorage/node/node_mine_test.go b/ethstorage/node/node_mine_test.go index f2f8bb99..76be5f1d 100644 --- a/ethstorage/node/node_mine_test.go +++ b/ethstorage/node/node_mine_test.go @@ -187,10 +187,7 @@ func fillEmpty(t *testing.T, n *EsNode, contract common.Address) { t.Fatalf("Failed to get block number %v", err) } n.storageManager.Reset(int64(block)) - lastBlobIdx, err := n.storageManager.LastKvIndex() - if err != nil { - t.Fatalf("get lastBlobIdx for FillEmptyKV fail, err: %s", err.Error()) - } + lastBlobIdx := n.storageManager.LastKvIndex() limit := n.storageManager.KvEntries() * uint64(len(shardIds)) for idx := lastBlobIdx; idx < limit; idx++ { err = n.storageManager.CommitBlob(idx, empty, common.Hash{}) diff --git a/ethstorage/p2p/node.go b/ethstorage/p2p/node.go index c997de98..0e799836 100644 --- a/ethstorage/p2p/node.go +++ b/ethstorage/p2p/node.go @@ -234,8 +234,8 @@ func (n *NodeP2P) ConnectionManager() connmgr.ConnManager { return n.connMgr } -func (n *NodeP2P) Start() { - n.syncCl.Start() +func (n *NodeP2P) Start() error { + return n.syncCl.Start() } func (n *NodeP2P) Close() error { diff --git a/ethstorage/p2p/protocol/syncclient.go b/ethstorage/p2p/protocol/syncclient.go index c02bfb9c..8cbe7498 100644 --- a/ethstorage/p2p/protocol/syncclient.go +++ b/ethstorage/p2p/protocol/syncclient.go @@ -129,9 +129,11 @@ type StorageManager interface { StorageManagerWriter - LastKvIndex() (uint64, error) + LastKvIndex() uint64 DecodeKV(kvIdx uint64, b []byte, hash common.Hash, providerAddr common.Address, encodeType uint64) ([]byte, bool, error) + + DownloadAllMetas() error } type SyncClient struct { @@ -277,12 +279,7 @@ func (s *SyncClient) loadSyncStatus() { } // create tasks - lastKvIndex, err := s.storageManager.LastKvIndex() - if err != nil { - // TODO: panic? - log.Info("LoadSyncStatus failed: get lastKvIdx") - lastKvIndex = 0 - } + lastKvIndex := s.storageManager.LastKvIndex() for _, sid := range s.storageManager.Shards() { exist := false for _, task := range progress.Tasks { @@ -448,7 +445,7 @@ func (s *SyncClient) cleanTasks() { } } -func (s *SyncClient) Start() { +func (s *SyncClient) Start() error { if s.startTime == (time.Time{}) { s.startTime = time.Now() s.logTime = time.Now() @@ -456,9 +453,18 @@ func (s *SyncClient) Start() { // Retrieve the previous sync status from LevelDB and abort if already synced s.loadSyncStatus() + s.cleanTasks() + if !s.syncDone { + err := s.storageManager.DownloadAllMetas() + if err != nil { + return err + } + } s.wg.Add(1) go s.mainLoop() + + return nil } func (s *SyncClient) AddPeer(id peer.ID, shards map[common.Address][]uint64) bool { @@ -999,14 +1005,12 @@ func (s *SyncClient) FillFileWithEmptyBlob(start, limit uint64) (uint64, error) next = start ) defer s.metrics.ClientFillEmptyBlobsEvent(inserted, time.Since(st)) - lastBlobIdx, err := s.storageManager.LastKvIndex() - if err != nil { - return start, fmt.Errorf("get lastBlobIdx for FillEmptyKV fail, err: %s", err.Error()) - } + lastBlobIdx := s.storageManager.LastKvIndex() + if start < lastBlobIdx { start = lastBlobIdx } - inserted, next, err = s.storageManager.CommitEmptyBlobs(start, limit) + inserted, next, err := s.storageManager.CommitEmptyBlobs(start, limit) return next, err } diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 9a96c6f3..15c80701 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -5,12 +5,16 @@ package ethstorage import ( "bytes" + "context" "errors" "math/big" "sync" + "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" ) const ( @@ -26,22 +30,26 @@ type Il1Source interface { GetKvMetas(kvIndices []uint64, blockNumber int64) ([][32]byte, error) GetStorageLastBlobIdx(blockNumber int64) (uint64, error) + + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) } // StorageManager is a higher-level abstract of ShardManager which provides multi-thread safety to storage file read/write // and a consistent view of most-recent-finalized L1 block. type StorageManager struct { shardManager *ShardManager - mu sync.Mutex // protect localL1 and underlying blob read/write - localL1 int64 // local view of most-recent-finalized L1 block + mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state + lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block l1Source Il1Source DownloadThreadNum int + blobMetas map[uint64][32]byte } func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { return &StorageManager{ shardManager: sm, l1Source: l1Source, + blobMetas: map[uint64][32]byte{}, } } @@ -55,12 +63,13 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs s.mu.Lock() defer s.mu.Unlock() - // in most case, newL1 should be equal to s.localL1 + 32 - // but it is possible that the node was shutdown for some time, and when it restart and DownloadFinished for the first time - // the new finalized L1 will be larger than that, so we just do the simple compare check here. - if newL1 <= s.localL1 { - return errors.New("new L1 is older than local L1") + lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) + if err != nil { + return err } + s.lastKvIdx = lastKvIdx + + s.updateLocalMetas(kvIndices, commits) taskNum := s.DownloadThreadNum var wg sync.WaitGroup @@ -108,8 +117,6 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs } } - s.localL1 = newL1 - return nil } @@ -125,11 +132,16 @@ func prepareCommit(commit common.Hash) common.Hash { } // Reset This function must be called before calling any other funcs, it will setup a local L1 view for the node. -func (s *StorageManager) Reset(newL1 int64) { +func (s *StorageManager) Reset(newL1 int64) error { s.mu.Lock() defer s.mu.Unlock() - s.localL1 = newL1 + lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) + if err != nil { + return err + } + s.lastKvIdx = lastKvIdx + return nil } // CommitBlobs This function will be called when p2p sync received blobs. It will commit the blobs @@ -156,18 +168,16 @@ func (s *StorageManager) CommitBlobs(kvIndices []uint64, blobs [][]byte, commits s.mu.Lock() defer s.mu.Unlock() - metas, err := s.l1Source.GetKvMetas(kvIndices, s.localL1) - if err != nil { - return nil, err - } + metas := s.getKvMetas(kvIndices) + inserted := []uint64{} for i, contractMeta := range metas { if !encoded[i] { continue } - err = s.commitEncodedBlob(kvIndices[i], encodedBlobs[i], commits[i], contractMeta) + err := s.commitEncodedBlob(kvIndices[i], encodedBlobs[i], commits[i], contractMeta) if err != nil { - log.Info("commit blobs fail", "kvIndex", kvIndices[i], "err", err.Error()) + log.Info("Commit blobs fail", "kvIndex", kvIndices[i], "err", err.Error()) continue } inserted = append(inserted, kvIndices[i]) @@ -198,16 +208,14 @@ func (s *StorageManager) CommitEmptyBlobs(start, limit uint64) (uint64, uint64, s.mu.Lock() defer s.mu.Unlock() - metas, err := s.l1Source.GetKvMetas(kvIndices, s.localL1) - if err != nil { - return inserted, next, err - } + metas:= s.getKvMetas(kvIndices) + for i, index := range kvIndices { - err = s.commitEncodedBlob(index, encodedBlobs[i], hash, metas[i]) + err := s.commitEncodedBlob(index, encodedBlobs[i], hash, metas[i]) if err == nil { inserted++ } else if err != errCommitMismatch { - log.Info("commit blobs fail", "kvIndex", kvIndices[i], "err", err.Error()) + log.Info("Commit blobs fail", "kvIndex", kvIndices[i], "err", err.Error()) break } // if meta is not equal to empty hash, that mean the blob is not empty, @@ -226,10 +234,8 @@ func (s *StorageManager) CommitBlob(kvIndex uint64, blob []byte, commit common.H } s.mu.Lock() defer s.mu.Unlock() - metas, err := s.l1Source.GetKvMetas([]uint64{kvIndex}, s.localL1) - if err != nil { - return err - } + metas:= s.getKvMetas([]uint64{kvIndex}) + if len(metas) != 1 { return errors.New("invalid params lens") } @@ -293,6 +299,92 @@ func (s *StorageManager) syncCheck(kvIdx uint64) error { return nil } +// DownloadAllMetas This function download the blob hashes of all the local storage shards from the smart contract +func (s *StorageManager) DownloadAllMetas() error { + s.mu.Lock() + defer s.mu.Unlock() + + header, err := s.l1Source.HeaderByNumber(context.Background(), big.NewInt(rpc.FinalizedBlockNumber.Int64())) + if err != nil { + return err + } + l1 := header.Number.Int64() + + lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(l1) + if err != nil { + return err + } + + for _, sid := range s.Shards() { + first, limit := s.KvEntries()*sid, s.KvEntries()*(sid+1) + + // batch request metas until the lastKvIdx + end := limit + if end > lastKvIdx { + end = lastKvIdx + } + log.Info("Begin to download metas", "shard", sid, "first", first, "end", end, "limit", limit, "lastKvIdx", lastKvIdx) + ts := time.Now() + + for first < end { + batchLimit := first + 10000 + if batchLimit > end { + batchLimit = end + } + kvIndices := []uint64{} + for i := first; i < batchLimit; i++ { + kvIndices = append(kvIndices, i) + } + + metas, err := s.l1Source.GetKvMetas(kvIndices, l1) + if err != nil { + return err + } + + for i, meta := range(metas) { + s.blobMetas[kvIndices[i]] = meta + } + + log.Info("One batch metas has been downloaded", "first", first, "batchLimit", batchLimit) + + first = batchLimit + } + + log.Info("All the metas has been downloaded", "first", first, "end", end, "time", time.Since(ts).Seconds()) + ts = time.Now() + + // empty blobs + for i := end; i < limit; i++ { + meta := [32]byte{} + new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) + + s.blobMetas[i] = meta + } + + log.Info("Empty metas has been filled", "first", end, "limit", limit, "time", time.Since(ts).Seconds()) + } + + return nil +} + +func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { + for i, idx := range kvIndices { + meta := [32]byte{} + new(big.Int).SetInt64(int64(idx)).FillBytes(meta[0:5]) + copy(meta[32-HashSizeInContract:32], commits[i][0:HashSizeInContract]) + + s.blobMetas[idx] = meta + } +} + +func (s *StorageManager) getKvMetas(kvIndices []uint64) [][32]byte { + metas := [][32]byte{} + for _, i := range kvIndices { + metas = append(metas, s.blobMetas[i]) + } + return metas +} + // TryReadEncoded This function will read the encoded data from the local storage file. It also check whether the blob is empty or not synced, // if they are these two cases, it will return err. func (s *StorageManager) TryReadEncoded(kvIdx uint64, readLen int) ([]byte, bool, error) { @@ -320,10 +412,10 @@ func (s *StorageManager) TryReadMeta(kvIdx uint64) ([]byte, bool, error) { return s.shardManager.TryReadMeta(kvIdx) } -func (s *StorageManager) LastKvIndex() (uint64, error) { +func (s *StorageManager) LastKvIndex() uint64 { s.mu.Lock() defer s.mu.Unlock() - return s.l1Source.GetStorageLastBlobIdx(s.localL1) + return s.lastKvIdx } func (s *StorageManager) DecodeKV(kvIdx uint64, b []byte, hash common.Hash, providerAddr common.Address, encodeType uint64) ([]byte, bool, error) { diff --git a/ethstorage/storage_manager_test.go b/ethstorage/storage_manager_test.go index 0c899909..372228ff 100644 --- a/ethstorage/storage_manager_test.go +++ b/ethstorage/storage_manager_test.go @@ -57,11 +57,7 @@ func setup(t *testing.T) { func TestStorageManager_LastKvIndex(t *testing.T) { setup(t) - idx, err := storageManager.LastKvIndex() - if err != nil { - t.Fatal("failed to get lastKvIndex", err) - } - + idx := storageManager.LastKvIndex() t.Log("lastKvIndex", idx) } From bfbe9a2c8f93d0d5adcf4270a0040f59f93741c9 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Thu, 23 Nov 2023 18:28:10 +0800 Subject: [PATCH 13/68] download in parallel --- ethstorage/storage_manager.go | 98 +++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 22 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 15c80701..67842a71 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -20,6 +20,8 @@ import ( const ( blobFillingMask = byte(0b10000000) HashSizeInContract = 24 + MetaBatchSize = 8000 + MetaDownloadThread = 32 ) var ( @@ -42,6 +44,7 @@ type StorageManager struct { lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block l1Source Il1Source DownloadThreadNum int + metaMapMu sync.Mutex // this is only for blobMetas writing thread safe in downloadMetaInParallel blobMetas map[uint64][32]byte } @@ -326,28 +329,9 @@ func (s *StorageManager) DownloadAllMetas() error { log.Info("Begin to download metas", "shard", sid, "first", first, "end", end, "limit", limit, "lastKvIdx", lastKvIdx) ts := time.Now() - for first < end { - batchLimit := first + 10000 - if batchLimit > end { - batchLimit = end - } - kvIndices := []uint64{} - for i := first; i < batchLimit; i++ { - kvIndices = append(kvIndices, i) - } - - metas, err := s.l1Source.GetKvMetas(kvIndices, l1) - if err != nil { - return err - } - - for i, meta := range(metas) { - s.blobMetas[kvIndices[i]] = meta - } - - log.Info("One batch metas has been downloaded", "first", first, "batchLimit", batchLimit) - - first = batchLimit + err := s.downloadMetaInParallel(first, end, l1) + if err != nil { + return err } log.Info("All the metas has been downloaded", "first", first, "end", end, "time", time.Since(ts).Seconds()) @@ -367,6 +351,76 @@ func (s *StorageManager) DownloadAllMetas() error { return nil } +func (s *StorageManager) downloadMetaInParallel(from, to uint64, l1 int64) error { + var wg sync.WaitGroup + taskNum := uint64(MetaDownloadThread) + + // We don't need to download in parallel if the meta amount is small + if to - from < uint64(taskNum) * MetaBatchSize { + return s.downloadMetaInRange(from, to, l1, 0) + } + + chanRes := make(chan error, taskNum) + defer close(chanRes) + + rangeSize := (to - from) / uint64(taskNum) + for taskIdx := uint64(0); taskIdx < taskNum; taskIdx++ { + rangeStart := taskIdx * rangeSize + rangeEnd := (taskIdx + 1) * rangeSize + if taskIdx == taskNum - 1 { + rangeEnd = to + } + wg.Add(1) + + go func(start, end, taskId uint64, out chan<- error) { + defer wg.Done() + err := s.downloadMetaInRange(start, end, l1, taskId) + + chanRes <- err + }(rangeStart, rangeEnd, taskIdx, chanRes) + } + + wg.Wait() + + for i := uint64(0); i < taskNum; i++ { + res := <- chanRes + if (res != nil) { + return res + } + } + + return nil +} + +func (s *StorageManager) downloadMetaInRange(from, to uint64, l1 int64, taskId uint64) error { + for from < to { + batchLimit := from + MetaBatchSize + if batchLimit > to { + batchLimit = to + } + kvIndices := []uint64{} + for i := from; i < batchLimit; i++ { + kvIndices = append(kvIndices, i) + } + + metas, err := s.l1Source.GetKvMetas(kvIndices, l1) + if err != nil { + return err + } + + s.metaMapMu.Lock() + for i, meta := range(metas) { + s.blobMetas[kvIndices[i]] = meta + } + s.metaMapMu.Unlock() + + log.Info("One batch metas has been downloaded", "first", from, "batchLimit", batchLimit, "to", to, "taskId", taskId) + + from = batchLimit + } + return nil +} + func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { for i, idx := range kvIndices { meta := [32]byte{} From ee4d0226ef457b88f4b0cd91da67a6e6ae34afaa Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 23 Nov 2023 19:45:14 +0800 Subject: [PATCH 14/68] gen notes --- .github/workflows/publish.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index dfba585e..f9f3a6cc 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,17 +19,13 @@ jobs: go-version: '1.20' - name: Build - run: | - mkdir build - go build -o build/es-node ./cmd/es-node/... - - - name: Generate Changelog - run: echo "# Good things have arrived" > ${{ github.workspace }}-CHANGELOG.txt + run: go build -o es-node-${{ github.ref }} ./cmd/es-node/... - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} name: Release ${{ github.ref }} - body_path: ${{ github.workspace }}-CHANGELOG.txt - files: build/es-node \ No newline at end of file + generate_release_notes: true + body: "# What's Changed" + files: es-node-${{ github.ref }} \ No newline at end of file From 5eff578547943ca25da46b83bdda30534fd34c4a Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 23 Nov 2023 19:59:28 +0800 Subject: [PATCH 15/68] gen notes --- .github/workflows/publish.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f9f3a6cc..072efbc2 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,13 +19,12 @@ jobs: go-version: '1.20' - name: Build - run: go build -o es-node-${{ github.ref }} ./cmd/es-node/... + run: go build -o es-node-${{ github.event.release.tag_name }} ./cmd/es-node/... - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} - name: Release ${{ github.ref }} + name: Release ${{ github.event.release.tag_name }} generate_release_notes: true - body: "# What's Changed" - files: es-node-${{ github.ref }} \ No newline at end of file + files: es-node-${{ github.event.release.tag_name }} \ No newline at end of file From 45be31c58f713938dd09137bf0a0ad0d828c8eef Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 23 Nov 2023 20:07:55 +0800 Subject: [PATCH 16/68] use ref_name --- .github/workflows/publish.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 072efbc2..cc75175c 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,12 +19,12 @@ jobs: go-version: '1.20' - name: Build - run: go build -o es-node-${{ github.event.release.tag_name }} ./cmd/es-node/... + run: go build -o es-node-${{github.ref_name}} ./cmd/es-node/... - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} - name: Release ${{ github.event.release.tag_name }} - generate_release_notes: true - files: es-node-${{ github.event.release.tag_name }} \ No newline at end of file + name: Release ${{github.ref_name}} + files: es-node-${{github.ref_name}} + generate_release_notes: true \ No newline at end of file From 1d599771d60f21bd9a7626129dc6e9813cf81f4e Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 09:42:59 +0800 Subject: [PATCH 17/68] add os to file name --- .github/workflows/publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index cc75175c..7abc19b3 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,12 +19,12 @@ jobs: go-version: '1.20' - name: Build - run: go build -o es-node-${{github.ref_name}} ./cmd/es-node/... + run: go build -o es-node.${{github.ref_name}}.ubuntu ./cmd/es-node/... - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} name: Release ${{github.ref_name}} - files: es-node-${{github.ref_name}} + files: es-node.${{github.ref_name}}.ubuntu generate_release_notes: true \ No newline at end of file From 1a4d368f332984af02610d2a1b5d2f91887f0dbe Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 10:54:11 +0800 Subject: [PATCH 18/68] sum nonce --- ethstorage/miner/worker.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 6fb70c15..46c64937 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -222,6 +222,7 @@ func (w *worker) assignTasks(task task, block eth.L1BlockRef, reqDiff *big.Int) w.lg.Debug("Mining task queued", "shard", ti.shardIdx, "thread", ti.thread, "block", ti.blockNumber, "blockTime", block.Time, "now", uint64(time.Now().Unix())) } } + w.lg.Info("Mining tasks assigned", "shard", task.shardIdx, "threads", w.config.ThreadsPerShard, "block", block.Number, "nonces", w.config.NonceLimit) } func (w *worker) updateDifficulty(shardIdx, blockTime uint64) (*big.Int, error) { @@ -352,13 +353,16 @@ func (w *worker) checkTxStatus(txHash common.Hash, miner common.Address) { func (w *worker) mineTask(t *taskItem) (bool, error) { startTime := time.Now() nonce := t.nonceStart - if t.thread == 0 { - w.lg.Info("Mining tasks started", "shard", t.shardIdx, "threads", w.config.ThreadsPerShard, "block", t.blockNumber, "nonces", fmt.Sprintf("%d~%d", 0, w.config.NonceLimit)) - } w.lg.Debug("Mining task started", "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "nonces", fmt.Sprintf("%d~%d", t.nonceStart, t.nonceEnd)) for w.isRunning() { if time.Since(startTime).Seconds() > mineTimeOut { - w.lg.Warn("Mining task timed out", "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "noncesTried", nonce-t.nonceStart) + if t.thread == 0 { + nonceTriedTotal := (nonce - t.nonceStart) * w.config.ThreadsPerShard + w.lg.Warn("Mining tasks timed out", "shard", t.shardIdx, "block", t.blockNumber, + "noncesTried", fmt.Sprintf("%d(%.1f%%)", nonceTriedTotal, float64(nonceTriedTotal)/float64(w.config.NonceLimit)), + ) + } + w.lg.Debug("Mining task timed out", "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "noncesTried", nonce-t.nonceStart) break } if nonce >= t.nonceEnd { From d21a2ba6155529457fb7d893fe3366fbcbd2bd98 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 11:16:35 +0800 Subject: [PATCH 19/68] minor --- ethstorage/miner/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 46c64937..96cac51d 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -372,7 +372,7 @@ func (w *worker) mineTask(t *taskItem) (bool, error) { } w.lg.Debug("The nonces are exhausted in this slot, waiting for the next block", "samplingTime", fmt.Sprintf("%.1fs", time.Since(startTime).Seconds()), - "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "nonce", nonce) + "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "nonceEnd", nonce) break } hash0 := initHash(t.miner, t.blockHash, nonce) From 0959f80eddc0b486f8652aa2006a2459f9d1c3b9 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 11:31:21 +0800 Subject: [PATCH 20/68] fix --- ethstorage/miner/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 96cac51d..37607a0f 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -359,7 +359,7 @@ func (w *worker) mineTask(t *taskItem) (bool, error) { if t.thread == 0 { nonceTriedTotal := (nonce - t.nonceStart) * w.config.ThreadsPerShard w.lg.Warn("Mining tasks timed out", "shard", t.shardIdx, "block", t.blockNumber, - "noncesTried", fmt.Sprintf("%d(%.1f%%)", nonceTriedTotal, float64(nonceTriedTotal)/float64(w.config.NonceLimit)), + "noncesTried", fmt.Sprintf("%d(%.1f%%)", nonceTriedTotal, float64(nonceTriedTotal*100)/float64(w.config.NonceLimit)), ) } w.lg.Debug("Mining task timed out", "shard", t.shardIdx, "thread", t.thread, "block", t.blockNumber, "noncesTried", nonce-t.nonceStart) From 62563cb5b091dd09bc8ed27fe064a99318907448 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 11:52:42 +0800 Subject: [PATCH 21/68] double threads --- ethstorage/miner/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/miner/config.go b/ethstorage/miner/config.go index 4a8e4aa5..28cfdad1 100644 --- a/ethstorage/miner/config.go +++ b/ethstorage/miner/config.go @@ -39,5 +39,5 @@ var DefaultConfig = Config{ PriorityGasPrice: nil, ZKeyFileName: "blob_poseidon.zkey", ZKWorkingDir: filepath.Join("ethstorage", "prover"), - ThreadsPerShard: uint64(runtime.NumCPU()), + ThreadsPerShard: uint64(2 * runtime.NumCPU()), } From dff8c21bde60ce1089f315183220fc01cc09f594 Mon Sep 17 00:00:00 2001 From: Qi Zhou Date: Thu, 23 Nov 2023 22:04:55 -0800 Subject: [PATCH 22/68] run.sh is runnable and accepts extra args --- run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) mode change 100644 => 100755 run.sh diff --git a/run.sh b/run.sh old mode 100644 new mode 100755 index 5044b5e1..c07e0bd3 --- a/run.sh +++ b/run.sh @@ -40,7 +40,8 @@ storage_file_0="$data_dir/shard-0.dat" common_flags=" --datadir $data_dir \ --l1.rpc http://65.108.236.27:8545 \ --storage.l1contract 0x9f9F5Fd89ad648f2C000C954d8d9C87743243eC5 \ - --storage.miner $ES_NODE_STORAGE_MINER" + --storage.miner $ES_NODE_STORAGE_MINER \ + $@" # init shard 0 es_node_init="init --shard_index 0" From cd3ac5566448598cdb6643442697236f7e9eb2f4 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Fri, 24 Nov 2023 14:53:18 +0800 Subject: [PATCH 23/68] Retry the getKvMetas request again in case it could fail occasionally in poor network connection --- ethstorage/storage_manager.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 67842a71..dd1ad16d 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -405,7 +405,11 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, l1 int64, taskId u metas, err := s.l1Source.GetKvMetas(kvIndices, l1) if err != nil { - return err + // Retry the request again in case it could fail occasionally in poor network connection + metas, err = s.l1Source.GetKvMetas(kvIndices, l1) + if err != nil { + return err + } } s.metaMapMu.Lock() From c8bdc87323051c366915730eb1d09bb37e3fc136 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 18:26:20 +0800 Subject: [PATCH 24/68] use make --- .github/workflows/publish.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7abc19b3..6a606458 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,7 +19,9 @@ jobs: go-version: '1.20' - name: Build - run: go build -o es-node.${{github.ref_name}}.ubuntu ./cmd/es-node/... + run: | + make + mv es-node es-node.${{github.ref_name}}.ubuntu - name: Create Release uses: softprops/action-gh-release@v1 From a55d6e052e0b20318d2ba585680680892ac939ad Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 18:39:41 +0800 Subject: [PATCH 25/68] fix path --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 6a606458..ee9fef54 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -21,7 +21,7 @@ jobs: - name: Build run: | make - mv es-node es-node.${{github.ref_name}}.ubuntu + mv cmd/es-node/es-node es-node.${{github.ref_name}}.ubuntu - name: Create Release uses: softprops/action-gh-release@v1 From 5260a5c72bb00b635cb3928c329bd700d1946583 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Fri, 24 Nov 2023 18:54:40 +0800 Subject: [PATCH 26/68] fix comments --- ethstorage/storage_manager.go | 68 +++++++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 19 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index dd1ad16d..cc84767d 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -40,11 +40,11 @@ type Il1Source interface { // and a consistent view of most-recent-finalized L1 block. type StorageManager struct { shardManager *ShardManager + localL1 int64 // local view of most-recent-finalized L1 block mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block l1Source Il1Source DownloadThreadNum int - metaMapMu sync.Mutex // this is only for blobMetas writing thread safe in downloadMetaInParallel blobMetas map[uint64][32]byte } @@ -64,16 +64,22 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs } s.mu.Lock() - defer s.mu.Unlock() + + // in most case, newL1 should be equal to s.localL1 + 32 + // but it is possible that the node was shutdown for some time, and when it restart and DownloadFinished for the first time + // the new finalized L1 will be larger than that, so we just do the simple compare check here. + if newL1 <= s.localL1 { + s.mu.Unlock() + return errors.New("new L1 is older than local L1") + } lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) if err != nil { + s.mu.Unlock() return err } s.lastKvIdx = lastKvIdx - s.updateLocalMetas(kvIndices, commits) - taskNum := s.DownloadThreadNum var wg sync.WaitGroup chanRes := make(chan error, taskNum) @@ -116,10 +122,17 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs for i := 0; i < taskIdx; i++ { res := <- chanRes if (res != nil) { + s.mu.Unlock() return res } } + s.localL1 = newL1 + + s.mu.Unlock() + + s.updateLocalMetas(kvIndices, commits) + return nil } @@ -144,6 +157,8 @@ func (s *StorageManager) Reset(newL1 int64) error { return err } s.lastKvIdx = lastKvIdx + s.localL1 = newL1 + return nil } @@ -169,9 +184,10 @@ func (s *StorageManager) CommitBlobs(kvIndices []uint64, blobs [][]byte, commits encoded[i] = true } + metas := s.getKvMetas(kvIndices) + s.mu.Lock() defer s.mu.Unlock() - metas := s.getKvMetas(kvIndices) inserted := []uint64{} for i, contractMeta := range metas { @@ -209,9 +225,10 @@ func (s *StorageManager) CommitEmptyBlobs(start, limit uint64) (uint64, uint64, kvIndices = append(kvIndices, i) } + metas:= s.getKvMetas(kvIndices) + s.mu.Lock() defer s.mu.Unlock() - metas:= s.getKvMetas(kvIndices) for i, index := range kvIndices { err := s.commitEncodedBlob(index, encodedBlobs[i], hash, metas[i]) @@ -235,9 +252,11 @@ func (s *StorageManager) CommitBlob(kvIndex uint64, blob []byte, commit common.H if !success || err != nil { return errors.New("blob encode failed") } + + metas:= s.getKvMetas([]uint64{kvIndex}) + s.mu.Lock() defer s.mu.Unlock() - metas:= s.getKvMetas([]uint64{kvIndex}) if len(metas) != 1 { return errors.New("invalid params lens") @@ -304,9 +323,6 @@ func (s *StorageManager) syncCheck(kvIdx uint64) error { // DownloadAllMetas This function download the blob hashes of all the local storage shards from the smart contract func (s *StorageManager) DownloadAllMetas() error { - s.mu.Lock() - defer s.mu.Unlock() - header, err := s.l1Source.HeaderByNumber(context.Background(), big.NewInt(rpc.FinalizedBlockNumber.Int64())) if err != nil { return err @@ -329,7 +345,7 @@ func (s *StorageManager) DownloadAllMetas() error { log.Info("Begin to download metas", "shard", sid, "first", first, "end", end, "limit", limit, "lastKvIdx", lastKvIdx) ts := time.Now() - err := s.downloadMetaInParallel(first, end, l1) + err := s.downloadMetaInParallel(first, end) if err != nil { return err } @@ -351,13 +367,13 @@ func (s *StorageManager) DownloadAllMetas() error { return nil } -func (s *StorageManager) downloadMetaInParallel(from, to uint64, l1 int64) error { +func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { var wg sync.WaitGroup taskNum := uint64(MetaDownloadThread) // We don't need to download in parallel if the meta amount is small if to - from < uint64(taskNum) * MetaBatchSize { - return s.downloadMetaInRange(from, to, l1, 0) + return s.downloadMetaInRange(from, to, 0) } chanRes := make(chan error, taskNum) @@ -374,7 +390,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64, l1 int64) error go func(start, end, taskId uint64, out chan<- error) { defer wg.Done() - err := s.downloadMetaInRange(start, end, l1, taskId) + err := s.downloadMetaInRange(start, end, taskId) chanRes <- err }(rangeStart, rangeEnd, taskIdx, chanRes) @@ -392,7 +408,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64, l1 int64) error return nil } -func (s *StorageManager) downloadMetaInRange(from, to uint64, l1 int64, taskId uint64) error { +func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) error { for from < to { batchLimit := from + MetaBatchSize if batchLimit > to { @@ -403,20 +419,28 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, l1 int64, taskId u kvIndices = append(kvIndices, i) } - metas, err := s.l1Source.GetKvMetas(kvIndices, l1) + s.mu.Lock() + localL1 := s.localL1 + s.mu.Unlock() + + metas, err := s.l1Source.GetKvMetas(kvIndices, localL1) if err != nil { // Retry the request again in case it could fail occasionally in poor network connection - metas, err = s.l1Source.GetKvMetas(kvIndices, l1) + metas, err = s.l1Source.GetKvMetas(kvIndices, localL1) if err != nil { return err } } - s.metaMapMu.Lock() + s.mu.Lock() + if localL1 != s.localL1 { + s.mu.Unlock() + continue + } for i, meta := range(metas) { s.blobMetas[kvIndices[i]] = meta } - s.metaMapMu.Unlock() + s.mu.Unlock() log.Info("One batch metas has been downloaded", "first", from, "batchLimit", batchLimit, "to", to, "taskId", taskId) @@ -426,6 +450,9 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, l1 int64, taskId u } func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { + s.mu.Lock() + defer s.mu.Unlock() + for i, idx := range kvIndices { meta := [32]byte{} new(big.Int).SetInt64(int64(idx)).FillBytes(meta[0:5]) @@ -436,6 +463,9 @@ func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.H } func (s *StorageManager) getKvMetas(kvIndices []uint64) [][32]byte { + s.mu.Lock() + defer s.mu.Unlock() + metas := [][32]byte{} for _, i := range kvIndices { metas = append(metas, s.blobMetas[i]) From 32ababac74805c2b5c23f49817e5e334a90022f7 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Fri, 24 Nov 2023 19:05:28 +0800 Subject: [PATCH 27/68] print download progress --- ethstorage/storage_manager.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index cc84767d..d4ec178b 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "errors" + "fmt" "math/big" "sync" "time" @@ -409,6 +410,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { } func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) error { + rangeStart := from for from < to { batchLimit := from + MetaBatchSize if batchLimit > to { @@ -442,7 +444,12 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) err } s.mu.Unlock() - log.Info("One batch metas has been downloaded", "first", from, "batchLimit", batchLimit, "to", to, "taskId", taskId) + log.Info( + "One batch metas has been downloaded", "first", from, + "batchLimit", batchLimit, + "to", to, + "progress", fmt.Sprintf("%.1f%%", float64((from - rangeStart)*100)/float64(to - rangeStart)), + "taskId", taskId) from = batchLimit } From ef2363c17e384b8e9d2d6332ae73adb47a2426ce Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 24 Nov 2023 19:09:09 +0800 Subject: [PATCH 28/68] update doc --- GUIDE.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/GUIDE.md b/GUIDE.md index e3e0acae..413d915c 100644 --- a/GUIDE.md +++ b/GUIDE.md @@ -97,10 +97,17 @@ nvm use 20 ```sh npm install -g snarkjs@0.7.0 ``` -#### 4. Build es-node +#### 4. Build or download es-node +You have two options for acquiring es-node: + +- Use the following command to build es-node: ```sh make ``` +- Alternatively, you can download the pre-built executable of the specific version from [the releases page](https://github.com/ethstorage/es-node/releases), such as es-node.v0.1.2.ubuntu, and then rename it using the following command: +```sh +mv es-node.${version}.ubuntu ./cmd/es-node/es-node +``` #### 5. Start es-node ```sh chmod +x run.sh && env ES_NODE_STORAGE_MINER= ES_NODE_SIGNER_PRIVATE_KEY= ./run.sh From d68a16340b5f07188cbbae0d1464144231197ef2 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Fri, 24 Nov 2023 19:13:44 +0800 Subject: [PATCH 29/68] commit state at last --- ethstorage/storage_manager.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index d4ec178b..b1e2f706 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -74,13 +74,6 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs return errors.New("new L1 is older than local L1") } - lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) - if err != nil { - s.mu.Unlock() - return err - } - s.lastKvIdx = lastKvIdx - taskNum := s.DownloadThreadNum var wg sync.WaitGroup chanRes := make(chan error, taskNum) @@ -128,6 +121,12 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs } } + lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) + if err != nil { + s.mu.Unlock() + return err + } + s.lastKvIdx = lastKvIdx s.localL1 = newL1 s.mu.Unlock() From bf0c469d2d85630a289f96fdfaf1e1bd1b8c80e7 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 27 Nov 2023 15:02:22 +0800 Subject: [PATCH 30/68] xplatform --- .github/workflows/publish.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index ee9fef54..f9d36cd4 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -20,13 +20,20 @@ jobs: - name: Build run: | - make - mv cmd/es-node/es-node es-node.${{github.ref_name}}.ubuntu + make TARGETOS=linux TARGETARCH=amd64 + mv cmd/es-node/es-node es-node.${{github.ref_name}}.linux-amd64 + make TARGETOS=darwin TARGETARCH=amd64 + mv cmd/es-node/es-node es-node.${{github.ref_name}}.darwin-amd64 + make TARGETOS=windows TARGETARCH=amd64 + mv cmd/es-node/es-node es-node.${{github.ref_name}}.windows-amd64 - name: Create Release uses: softprops/action-gh-release@v1 with: tag_name: ${{ github.ref }} name: Release ${{github.ref_name}} - files: es-node.${{github.ref_name}}.ubuntu + files: | + es-node.${{github.ref_name}}.linux-amd64 + es-node.${{github.ref_name}}.darwin-amd64 + es-node.${{github.ref_name}}.windows-amd64 generate_release_notes: true \ No newline at end of file From 5c82602660c782976b4ec0e1b0b068494878e707 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 27 Nov 2023 15:25:31 +0800 Subject: [PATCH 31/68] arm --- .github/workflows/publish.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f9d36cd4..1959a259 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -24,6 +24,8 @@ jobs: mv cmd/es-node/es-node es-node.${{github.ref_name}}.linux-amd64 make TARGETOS=darwin TARGETARCH=amd64 mv cmd/es-node/es-node es-node.${{github.ref_name}}.darwin-amd64 + make TARGETOS=darwin TARGETARCH=arm64 + mv cmd/es-node/es-node es-node.${{github.ref_name}}.darwin-arm64 make TARGETOS=windows TARGETARCH=amd64 mv cmd/es-node/es-node es-node.${{github.ref_name}}.windows-amd64 @@ -35,5 +37,6 @@ jobs: files: | es-node.${{github.ref_name}}.linux-amd64 es-node.${{github.ref_name}}.darwin-amd64 + es-node.${{github.ref_name}}.darwin-arm64 es-node.${{github.ref_name}}.windows-amd64 generate_release_notes: true \ No newline at end of file From e0ebfa3ae7d87146dafa748c08201700782d7d78 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 27 Nov 2023 15:35:24 +0800 Subject: [PATCH 32/68] revert doc change --- GUIDE.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/GUIDE.md b/GUIDE.md index 413d915c..e3e0acae 100644 --- a/GUIDE.md +++ b/GUIDE.md @@ -97,17 +97,10 @@ nvm use 20 ```sh npm install -g snarkjs@0.7.0 ``` -#### 4. Build or download es-node -You have two options for acquiring es-node: - -- Use the following command to build es-node: +#### 4. Build es-node ```sh make ``` -- Alternatively, you can download the pre-built executable of the specific version from [the releases page](https://github.com/ethstorage/es-node/releases), such as es-node.v0.1.2.ubuntu, and then rename it using the following command: -```sh -mv es-node.${version}.ubuntu ./cmd/es-node/es-node -``` #### 5. Start es-node ```sh chmod +x run.sh && env ES_NODE_STORAGE_MINER= ES_NODE_SIGNER_PRIVATE_KEY= ./run.sh From a73546aafd9b434edf20deab885ea9511a928bfa Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Tue, 28 Nov 2023 15:13:31 +0800 Subject: [PATCH 33/68] resolve comments --- ethstorage/flags/p2p_flags.go | 8 +++ ethstorage/node/node.go | 2 +- ethstorage/p2p/cli/load_config.go | 3 +- ethstorage/p2p/protocol/syncclient.go | 16 ++--- ethstorage/p2p/protocol/types.go | 5 +- ethstorage/storage_manager.go | 90 ++++++++++++++------------- 6 files changed, 70 insertions(+), 54 deletions(-) diff --git a/ethstorage/flags/p2p_flags.go b/ethstorage/flags/p2p_flags.go index d20b84d2..37b972a2 100644 --- a/ethstorage/flags/p2p_flags.go +++ b/ethstorage/flags/p2p_flags.go @@ -169,6 +169,13 @@ var ( Value: 16, EnvVar: p2pEnv("MAX_CONCURRENCY"), } + MetaDownloadBatchSize = cli.Uint64Flag{ + Name: "p2p.meta.download.batch", + Usage: "Batch size for requesting the blob metadatas stored in the storage contract in one RPC call.", + Required: false, + Value: 8000, // The upper limit of devnet-11 geth node + EnvVar: p2pEnv("META_BATCH_SIZE"), + } PeersLo = cli.UintFlag{ Name: "p2p.peers.lo", Usage: "Low-tide peer count. The node actively searches for new peer connections if below this amount.", @@ -332,6 +339,7 @@ var p2pFlags = []cli.Flag{ HostSecurity, MaxRequestSize, MaxConcurrency, + MetaDownloadBatchSize, PeersLo, PeersHi, PeersGrace, diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 0ad1df3c..c2399886 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -210,7 +210,7 @@ func (n *EsNode) initStorageManager(ctx context.Context, cfg *Config) error { "chunkSize", shardManager.ChunkSize(), "kvsPerShard", shardManager.KvEntries()) - n.storageManager = ethstorage.NewStorageManager(shardManager, n.l1Source) + n.storageManager = ethstorage.NewStorageManager(shardManager, n.l1Source, cfg.P2P.SyncerParams().MetaDownloadBatchSize) return nil } diff --git a/ethstorage/p2p/cli/load_config.go b/ethstorage/p2p/cli/load_config.go index a3e0b558..1efd92ee 100644 --- a/ethstorage/p2p/cli/load_config.go +++ b/ethstorage/p2p/cli/load_config.go @@ -362,11 +362,12 @@ func loadGossipOptions(conf *p2p.Config, ctx *cli.Context) error { // loadSyncerParams loads [protocol.SyncerParams] from the CLI context. func loadSyncerParams(conf *p2p.Config, ctx *cli.Context) error { + metaDownloadBatchSize := ctx.GlobalUint64(flags.MetaDownloadBatchSize.Name) maxRequestSize := ctx.GlobalUint64(flags.MaxRequestSize.Name) maxConcurrency := ctx.GlobalUint64(flags.MaxConcurrency.Name) if maxConcurrency < 1 { return fmt.Errorf("p2p.max.concurrency param is invalid: the value should larger than 0") } - conf.SyncParams = &protocol.SyncerParams{MaxRequestSize: maxRequestSize, MaxConcurrency: maxConcurrency} + conf.SyncParams = &protocol.SyncerParams{MaxRequestSize: maxRequestSize, MaxConcurrency: maxConcurrency, MetaDownloadBatchSize: metaDownloadBatchSize} return nil } diff --git a/ethstorage/p2p/protocol/syncclient.go b/ethstorage/p2p/protocol/syncclient.go index 8cbe7498..b73a0a52 100644 --- a/ethstorage/p2p/protocol/syncclient.go +++ b/ethstorage/p2p/protocol/syncclient.go @@ -453,13 +453,6 @@ func (s *SyncClient) Start() error { // Retrieve the previous sync status from LevelDB and abort if already synced s.loadSyncStatus() - s.cleanTasks() - if !s.syncDone { - err := s.storageManager.DownloadAllMetas() - if err != nil { - return err - } - } s.wg.Add(1) go s.mainLoop() @@ -568,6 +561,15 @@ func (s *SyncClient) RequestL2List(indexes []uint64) (uint64, error) { func (s *SyncClient) mainLoop() { defer s.wg.Done() + s.cleanTasks() + if !s.syncDone { + err := s.storageManager.DownloadAllMetas() + if err != nil { + log.Error("Download blob metadata failed", "error", err) + return + } + } + for { // Remove all completed tasks and terminate sync if everything's done s.cleanTasks() diff --git a/ethstorage/p2p/protocol/types.go b/ethstorage/p2p/protocol/types.go index a1e09804..7db0eaf4 100644 --- a/ethstorage/p2p/protocol/types.go +++ b/ethstorage/p2p/protocol/types.go @@ -135,6 +135,7 @@ type EthStorageSyncDone struct { } type SyncerParams struct { - MaxRequestSize uint64 - MaxConcurrency uint64 + MaxRequestSize uint64 + MaxConcurrency uint64 + MetaDownloadBatchSize uint64 } diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index b1e2f706..b1529872 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -15,13 +15,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" ) const ( blobFillingMask = byte(0b10000000) HashSizeInContract = 24 - MetaBatchSize = 8000 MetaDownloadThread = 32 ) @@ -40,20 +38,22 @@ type Il1Source interface { // StorageManager is a higher-level abstract of ShardManager which provides multi-thread safety to storage file read/write // and a consistent view of most-recent-finalized L1 block. type StorageManager struct { - shardManager *ShardManager - localL1 int64 // local view of most-recent-finalized L1 block - mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state - lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block - l1Source Il1Source DownloadThreadNum int - blobMetas map[uint64][32]byte + shardManager *ShardManager + localL1 int64 // local view of most-recent-finalized L1 block + mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state + lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block + l1Source Il1Source + metaDownloadBatchSize uint64 + blobMetas map[uint64][32]byte } -func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { +func NewStorageManager(sm *ShardManager, l1Source Il1Source, batchSize uint64) *StorageManager { return &StorageManager{ - shardManager: sm, - l1Source: l1Source, - blobMetas: map[uint64][32]byte{}, + shardManager: sm, + l1Source: l1Source, + metaDownloadBatchSize: batchSize, + blobMetas: map[uint64][32]byte{}, } } @@ -65,12 +65,12 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs } s.mu.Lock() + defer s.mu.Unlock() // in most case, newL1 should be equal to s.localL1 + 32 // but it is possible that the node was shutdown for some time, and when it restart and DownloadFinished for the first time // the new finalized L1 will be larger than that, so we just do the simple compare check here. if newL1 <= s.localL1 { - s.mu.Unlock() return errors.New("new L1 is older than local L1") } @@ -116,21 +116,17 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs for i := 0; i < taskIdx; i++ { res := <- chanRes if (res != nil) { - s.mu.Unlock() return res } } lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(newL1) if err != nil { - s.mu.Unlock() return err } s.lastKvIdx = lastKvIdx s.localL1 = newL1 - s.mu.Unlock() - s.updateLocalMetas(kvIndices, commits) return nil @@ -184,7 +180,10 @@ func (s *StorageManager) CommitBlobs(kvIndices []uint64, blobs [][]byte, commits encoded[i] = true } - metas := s.getKvMetas(kvIndices) + metas, err := s.getKvMetas(kvIndices) + if err != nil { + return nil, err + } s.mu.Lock() defer s.mu.Unlock() @@ -225,7 +224,10 @@ func (s *StorageManager) CommitEmptyBlobs(start, limit uint64) (uint64, uint64, kvIndices = append(kvIndices, i) } - metas:= s.getKvMetas(kvIndices) + metas, err := s.getKvMetas(kvIndices) + if err != nil { + return inserted, next, err + } s.mu.Lock() defer s.mu.Unlock() @@ -253,7 +255,10 @@ func (s *StorageManager) CommitBlob(kvIndex uint64, blob []byte, commit common.H return errors.New("blob encode failed") } - metas:= s.getKvMetas([]uint64{kvIndex}) + metas, err := s.getKvMetas([]uint64{kvIndex}) + if err != nil { + return err + } s.mu.Lock() defer s.mu.Unlock() @@ -323,16 +328,9 @@ func (s *StorageManager) syncCheck(kvIdx uint64) error { // DownloadAllMetas This function download the blob hashes of all the local storage shards from the smart contract func (s *StorageManager) DownloadAllMetas() error { - header, err := s.l1Source.HeaderByNumber(context.Background(), big.NewInt(rpc.FinalizedBlockNumber.Int64())) - if err != nil { - return err - } - l1 := header.Number.Int64() - - lastKvIdx, err := s.l1Source.GetStorageLastBlobIdx(l1) - if err != nil { - return err - } + s.mu.Lock() + lastKvIdx := s.lastKvIdx + s.mu.Unlock() for _, sid := range s.Shards() { first, limit := s.KvEntries()*sid, s.KvEntries()*(sid+1) @@ -372,7 +370,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { taskNum := uint64(MetaDownloadThread) // We don't need to download in parallel if the meta amount is small - if to - from < uint64(taskNum) * MetaBatchSize { + if to - from < uint64(taskNum) * s.metaDownloadBatchSize { return s.downloadMetaInRange(from, to, 0) } @@ -411,7 +409,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) error { rangeStart := from for from < to { - batchLimit := from + MetaBatchSize + batchLimit := from + s.metaDownloadBatchSize if batchLimit > to { batchLimit = to } @@ -425,12 +423,14 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) err s.mu.Unlock() metas, err := s.l1Source.GetKvMetas(kvIndices, localL1) - if err != nil { - // Retry the request again in case it could fail occasionally in poor network connection + for retryTimes := 0; (retryTimes < 10) && (err != nil); retryTimes++ { + // Retry the request for 10 times in case it could fail occasionally in poor network connection + time.Sleep(2 * time.Second) metas, err = s.l1Source.GetKvMetas(kvIndices, localL1) - if err != nil { - return err - } + } + + if err != nil { + return err } s.mu.Lock() @@ -455,10 +455,9 @@ func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) err return nil } +// This function is only called by DownloadFinished which already uses s.mu to protect the s.blobMetas, so +// we don't need to lock in this function func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { - s.mu.Lock() - defer s.mu.Unlock() - for i, idx := range kvIndices { meta := [32]byte{} new(big.Int).SetInt64(int64(idx)).FillBytes(meta[0:5]) @@ -468,15 +467,20 @@ func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.H } } -func (s *StorageManager) getKvMetas(kvIndices []uint64) [][32]byte { +func (s *StorageManager) getKvMetas(kvIndices []uint64) ([][32]byte, error) { s.mu.Lock() defer s.mu.Unlock() metas := [][32]byte{} for _, i := range kvIndices { - metas = append(metas, s.blobMetas[i]) + meta, ok := s.blobMetas[i] + if ok { + metas = append(metas, meta) + } else { + return nil, errors.New("meta not found in blobMetas") + } } - return metas + return metas, nil } // TryReadEncoded This function will read the encoded data from the local storage file. It also check whether the blob is empty or not synced, From a67b287f64c9b580feeed39ba7758fe888b29359 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Tue, 28 Nov 2023 16:24:43 +0800 Subject: [PATCH 34/68] refactor --- ethstorage/node/node.go | 2 +- ethstorage/p2p/protocol/syncclient.go | 4 ++-- ethstorage/storage_manager.go | 20 +++++++++----------- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index c2399886..0ad1df3c 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -210,7 +210,7 @@ func (n *EsNode) initStorageManager(ctx context.Context, cfg *Config) error { "chunkSize", shardManager.ChunkSize(), "kvsPerShard", shardManager.KvEntries()) - n.storageManager = ethstorage.NewStorageManager(shardManager, n.l1Source, cfg.P2P.SyncerParams().MetaDownloadBatchSize) + n.storageManager = ethstorage.NewStorageManager(shardManager, n.l1Source) return nil } diff --git a/ethstorage/p2p/protocol/syncclient.go b/ethstorage/p2p/protocol/syncclient.go index b73a0a52..c1722448 100644 --- a/ethstorage/p2p/protocol/syncclient.go +++ b/ethstorage/p2p/protocol/syncclient.go @@ -133,7 +133,7 @@ type StorageManager interface { DecodeKV(kvIdx uint64, b []byte, hash common.Hash, providerAddr common.Address, encodeType uint64) ([]byte, bool, error) - DownloadAllMetas() error + DownloadAllMetas(batchSize uint64) error } type SyncClient struct { @@ -563,7 +563,7 @@ func (s *SyncClient) mainLoop() { s.cleanTasks() if !s.syncDone { - err := s.storageManager.DownloadAllMetas() + err := s.storageManager.DownloadAllMetas(s.syncerParams.MetaDownloadBatchSize) if err != nil { log.Error("Download blob metadata failed", "error", err) return diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index b1529872..922559ab 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -44,15 +44,13 @@ type StorageManager struct { mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block l1Source Il1Source - metaDownloadBatchSize uint64 blobMetas map[uint64][32]byte } -func NewStorageManager(sm *ShardManager, l1Source Il1Source, batchSize uint64) *StorageManager { +func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { return &StorageManager{ shardManager: sm, l1Source: l1Source, - metaDownloadBatchSize: batchSize, blobMetas: map[uint64][32]byte{}, } } @@ -327,7 +325,7 @@ func (s *StorageManager) syncCheck(kvIdx uint64) error { } // DownloadAllMetas This function download the blob hashes of all the local storage shards from the smart contract -func (s *StorageManager) DownloadAllMetas() error { +func (s *StorageManager) DownloadAllMetas(batchSize uint64) error { s.mu.Lock() lastKvIdx := s.lastKvIdx s.mu.Unlock() @@ -343,7 +341,7 @@ func (s *StorageManager) DownloadAllMetas() error { log.Info("Begin to download metas", "shard", sid, "first", first, "end", end, "limit", limit, "lastKvIdx", lastKvIdx) ts := time.Now() - err := s.downloadMetaInParallel(first, end) + err := s.downloadMetaInParallel(first, end, batchSize) if err != nil { return err } @@ -365,13 +363,13 @@ func (s *StorageManager) DownloadAllMetas() error { return nil } -func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { +func (s *StorageManager) downloadMetaInParallel(from, to, batchSize uint64) error { var wg sync.WaitGroup taskNum := uint64(MetaDownloadThread) // We don't need to download in parallel if the meta amount is small - if to - from < uint64(taskNum) * s.metaDownloadBatchSize { - return s.downloadMetaInRange(from, to, 0) + if to - from < uint64(taskNum) * batchSize { + return s.downloadMetaInRange(from, to, batchSize, 0) } chanRes := make(chan error, taskNum) @@ -388,7 +386,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { go func(start, end, taskId uint64, out chan<- error) { defer wg.Done() - err := s.downloadMetaInRange(start, end, taskId) + err := s.downloadMetaInRange(start, end, batchSize, taskId) chanRes <- err }(rangeStart, rangeEnd, taskIdx, chanRes) @@ -406,10 +404,10 @@ func (s *StorageManager) downloadMetaInParallel(from, to uint64) error { return nil } -func (s *StorageManager) downloadMetaInRange(from, to uint64, taskId uint64) error { +func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) error { rangeStart := from for from < to { - batchLimit := from + s.metaDownloadBatchSize + batchLimit := from + batchSize if batchLimit > to { batchLimit = to } From 84421d021724eb7e5aae45a4dc7fb6183843f929 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Tue, 28 Nov 2023 16:29:16 +0800 Subject: [PATCH 35/68] refactor --- ethstorage/storage_manager.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 922559ab..162e1ff8 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -5,7 +5,6 @@ package ethstorage import ( "bytes" - "context" "errors" "fmt" "math/big" @@ -13,7 +12,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -31,8 +29,6 @@ type Il1Source interface { GetKvMetas(kvIndices []uint64, blockNumber int64) ([][32]byte, error) GetStorageLastBlobIdx(blockNumber int64) (uint64, error) - - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) } // StorageManager is a higher-level abstract of ShardManager which provides multi-thread safety to storage file read/write From b965cdc8676032b89fb70683751f5815a7c0aba4 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 28 Nov 2023 19:22:12 +0800 Subject: [PATCH 36/68] fix guide --- GUIDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GUIDE.md b/GUIDE.md index e3e0acae..33ebce3a 100644 --- a/GUIDE.md +++ b/GUIDE.md @@ -99,7 +99,7 @@ npm install -g snarkjs@0.7.0 ``` #### 4. Build es-node ```sh -make +cd cmd/es-node && go build && cd ../.. ``` #### 5. Start es-node ```sh From 23ae727f02f7382eafa9049db944824aab4d1334 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Wed, 29 Nov 2023 15:22:48 +0800 Subject: [PATCH 37/68] resolve comments --- ethstorage/storage_manager.go | 43 ++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 162e1ff8..0afd7811 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -174,14 +174,15 @@ func (s *StorageManager) CommitBlobs(kvIndices []uint64, blobs [][]byte, commits encoded[i] = true } + + s.mu.Lock() + defer s.mu.Unlock() + metas, err := s.getKvMetas(kvIndices) if err != nil { return nil, err } - s.mu.Lock() - defer s.mu.Unlock() - inserted := []uint64{} for i, contractMeta := range metas { if !encoded[i] { @@ -218,14 +219,14 @@ func (s *StorageManager) CommitEmptyBlobs(start, limit uint64) (uint64, uint64, kvIndices = append(kvIndices, i) } + s.mu.Lock() + defer s.mu.Unlock() + metas, err := s.getKvMetas(kvIndices) if err != nil { return inserted, next, err } - s.mu.Lock() - defer s.mu.Unlock() - for i, index := range kvIndices { err := s.commitEncodedBlob(index, encodedBlobs[i], hash, metas[i]) if err == nil { @@ -249,14 +250,14 @@ func (s *StorageManager) CommitBlob(kvIndex uint64, blob []byte, commit common.H return errors.New("blob encode failed") } + s.mu.Lock() + defer s.mu.Unlock() + metas, err := s.getKvMetas([]uint64{kvIndex}) if err != nil { return err } - s.mu.Lock() - defer s.mu.Unlock() - if len(metas) != 1 { return errors.New("invalid params lens") } @@ -403,19 +404,25 @@ func (s *StorageManager) downloadMetaInParallel(from, to, batchSize uint64) erro func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) error { rangeStart := from for from < to { + s.mu.Lock() + localL1 := s.localL1 + lastKvIdx := s.lastKvIdx + s.mu.Unlock() + batchLimit := from + batchSize if batchLimit > to { batchLimit = to } + // In case remove is supported and lastKvIndex is decreased + if batchLimit > lastKvIdx { + batchLimit = lastKvIdx + } + kvIndices := []uint64{} for i := from; i < batchLimit; i++ { kvIndices = append(kvIndices, i) } - s.mu.Lock() - localL1 := s.localL1 - s.mu.Unlock() - metas, err := s.l1Source.GetKvMetas(kvIndices, localL1) for retryTimes := 0; (retryTimes < 10) && (err != nil); retryTimes++ { // Retry the request for 10 times in case it could fail occasionally in poor network connection @@ -459,12 +466,16 @@ func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.H s.blobMetas[idx] = meta } + + // Remove the metas in the map if lastKvIdx is smaller because of removal + lastLocalMetaIdx := len(s.blobMetas) - 1 + for i := lastLocalMetaIdx; i > int(s.lastKvIdx) - 1; i-- { + delete(s.blobMetas, uint64(i)) + } } +// Please note that the caller function must uses s.mu to protect the s.blobMetas reading in this function func (s *StorageManager) getKvMetas(kvIndices []uint64) ([][32]byte, error) { - s.mu.Lock() - defer s.mu.Unlock() - metas := [][32]byte{} for _, i := range kvIndices { meta, ok := s.blobMetas[i] From 2c759e9ea7efe14e1768e0db75e8666c804e1844 Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Wed, 29 Nov 2023 17:09:11 +0800 Subject: [PATCH 38/68] reset instead of removal --- ethstorage/storage_manager.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 0afd7811..c421f029 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -118,10 +118,11 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs if err != nil { return err } + oldLastKvIdx := s.lastKvIdx s.lastKvIdx = lastKvIdx s.localL1 = newL1 - s.updateLocalMetas(kvIndices, commits) + s.updateLocalMetas(kvIndices, commits, oldLastKvIdx) return nil } @@ -458,7 +459,7 @@ func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) // This function is only called by DownloadFinished which already uses s.mu to protect the s.blobMetas, so // we don't need to lock in this function -func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { +func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash, oldLastKvIndex uint64) { for i, idx := range kvIndices { meta := [32]byte{} new(big.Int).SetInt64(int64(idx)).FillBytes(meta[0:5]) @@ -467,10 +468,12 @@ func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.H s.blobMetas[idx] = meta } - // Remove the metas in the map if lastKvIdx is smaller because of removal - lastLocalMetaIdx := len(s.blobMetas) - 1 - for i := lastLocalMetaIdx; i > int(s.lastKvIdx) - 1; i-- { - delete(s.blobMetas, uint64(i)) + // In case the lastKvIdx is smaller than oldLastKvIdx because of removal, we need to reset the removal metas + for i := s.lastKvIdx; i < oldLastKvIndex; i++ { + meta := [32]byte{} + new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) + + s.blobMetas[i] = meta } } From a5f27887d235b2328f9ef249366e2a86fe9ae9cf Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Thu, 30 Nov 2023 09:57:44 +0800 Subject: [PATCH 39/68] format and fix comments --- ethstorage/storage_manager.go | 69 +++++++++++++++-------------------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index c421f029..570b5727 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -35,19 +35,19 @@ type Il1Source interface { // and a consistent view of most-recent-finalized L1 block. type StorageManager struct { DownloadThreadNum int - shardManager *ShardManager - localL1 int64 // local view of most-recent-finalized L1 block - mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state - lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block - l1Source Il1Source - blobMetas map[uint64][32]byte + shardManager *ShardManager + localL1 int64 // local view of most-recent-finalized L1 block + mu sync.Mutex // protect lastKvIdx, shardManager and blobMeta read/write state + lastKvIdx uint64 // lastKvIndex in the most-recent-finalized L1 block + l1Source Il1Source + blobMetas map[uint64][32]byte } func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { return &StorageManager{ - shardManager: sm, - l1Source: l1Source, - blobMetas: map[uint64][32]byte{}, + shardManager: sm, + l1Source: l1Source, + blobMetas: map[uint64][32]byte{}, } } @@ -108,8 +108,8 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs wg.Wait() for i := 0; i < taskIdx; i++ { - res := <- chanRes - if (res != nil) { + res := <-chanRes + if res != nil { return res } } @@ -118,11 +118,10 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs if err != nil { return err } - oldLastKvIdx := s.lastKvIdx s.lastKvIdx = lastKvIdx s.localL1 = newL1 - s.updateLocalMetas(kvIndices, commits, oldLastKvIdx) + s.updateLocalMetas(kvIndices, commits) return nil } @@ -175,7 +174,6 @@ func (s *StorageManager) CommitBlobs(kvIndices []uint64, blobs [][]byte, commits encoded[i] = true } - s.mu.Lock() defer s.mu.Unlock() @@ -345,17 +343,6 @@ func (s *StorageManager) DownloadAllMetas(batchSize uint64) error { } log.Info("All the metas has been downloaded", "first", first, "end", end, "time", time.Since(ts).Seconds()) - ts = time.Now() - - // empty blobs - for i := end; i < limit; i++ { - meta := [32]byte{} - new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) - - s.blobMetas[i] = meta - } - - log.Info("Empty metas has been filled", "first", end, "limit", limit, "time", time.Since(ts).Seconds()) } return nil @@ -366,7 +353,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to, batchSize uint64) erro taskNum := uint64(MetaDownloadThread) // We don't need to download in parallel if the meta amount is small - if to - from < uint64(taskNum) * batchSize { + if to-from < uint64(taskNum)*batchSize { return s.downloadMetaInRange(from, to, batchSize, 0) } @@ -377,7 +364,7 @@ func (s *StorageManager) downloadMetaInParallel(from, to, batchSize uint64) erro for taskIdx := uint64(0); taskIdx < taskNum; taskIdx++ { rangeStart := taskIdx * rangeSize rangeEnd := (taskIdx + 1) * rangeSize - if taskIdx == taskNum - 1 { + if taskIdx == taskNum-1 { rangeEnd = to } wg.Add(1) @@ -393,8 +380,8 @@ func (s *StorageManager) downloadMetaInParallel(from, to, batchSize uint64) erro wg.Wait() for i := uint64(0); i < taskNum; i++ { - res := <- chanRes - if (res != nil) { + res := <-chanRes + if res != nil { return res } } @@ -440,7 +427,7 @@ func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) s.mu.Unlock() continue } - for i, meta := range(metas) { + for i, meta := range metas { s.blobMetas[kvIndices[i]] = meta } s.mu.Unlock() @@ -449,7 +436,7 @@ func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) "One batch metas has been downloaded", "first", from, "batchLimit", batchLimit, "to", to, - "progress", fmt.Sprintf("%.1f%%", float64((from - rangeStart)*100)/float64(to - rangeStart)), + "progress", fmt.Sprintf("%.1f%%", float64((from-rangeStart)*100)/float64(to-rangeStart)), "taskId", taskId) from = batchLimit @@ -459,7 +446,7 @@ func (s *StorageManager) downloadMetaInRange(from, to, batchSize, taskId uint64) // This function is only called by DownloadFinished which already uses s.mu to protect the s.blobMetas, so // we don't need to lock in this function -func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash, oldLastKvIndex uint64) { +func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.Hash) { for i, idx := range kvIndices { meta := [32]byte{} new(big.Int).SetInt64(int64(idx)).FillBytes(meta[0:5]) @@ -468,12 +455,10 @@ func (s *StorageManager) updateLocalMetas(kvIndices []uint64, commits []common.H s.blobMetas[idx] = meta } - // In case the lastKvIdx is smaller than oldLastKvIdx because of removal, we need to reset the removal metas - for i := s.lastKvIdx; i < oldLastKvIndex; i++ { - meta := [32]byte{} - new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) - - s.blobMetas[i] = meta + // In case the lastKvIdx is smaller than oldLastKvIdx because of removal, we need to remove those metas + LocalMetaLen := len(s.blobMetas) + for i := int(s.lastKvIdx); i < LocalMetaLen; i++ { + delete(s.blobMetas, uint64(i)) } } @@ -485,7 +470,13 @@ func (s *StorageManager) getKvMetas(kvIndices []uint64) ([][32]byte, error) { if ok { metas = append(metas, meta) } else { - return nil, errors.New("meta not found in blobMetas") + if i >= s.lastKvIdx { + meta := [32]byte{} + new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) + metas = append(metas, meta) + } else { + return nil, errors.New("meta not found in blobMetas") + } } } return metas, nil From bc60d5a13c4d49d23400a77d989e37b9eb3b58d8 Mon Sep 17 00:00:00 2001 From: pingke Date: Thu, 30 Nov 2023 10:22:44 +0800 Subject: [PATCH 40/68] merge p2p/protocol/metrics.go to metrics/metrics.go --- ethstorage/metrics/metrics.go | 602 ++++++++++++++++++++++++++ ethstorage/p2p/protocol/metrics.go | 403 ----------------- ethstorage/p2p/protocol/sync_test.go | 43 +- ethstorage/p2p/protocol/syncclient.go | 9 +- ethstorage/p2p/protocol/syncserver.go | 9 +- 5 files changed, 634 insertions(+), 432 deletions(-) delete mode 100644 ethstorage/p2p/protocol/metrics.go diff --git a/ethstorage/metrics/metrics.go b/ethstorage/metrics/metrics.go index 180945eb..f4236426 100644 --- a/ethstorage/metrics/metrics.go +++ b/ethstorage/metrics/metrics.go @@ -1,13 +1,424 @@ +// Copyright 2022-2023, EthStorage. +// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE + package metrics import ( + "context" + "fmt" + "net" + "strconv" + "time" + + ophttp "github.com/ethereum-optimism/optimism/op-node/http" + "github.com/ethereum-optimism/optimism/op-service/metrics" pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +const ( + Namespace = "es_node" + + SyncServerSubsystem = "sync_server" + SyncClientSubsystem = "sync_client" + ContractMetrics = "contract_data" ) +type Metricer interface { + SetLastKVIndexAndMaxShardId(lastL1Block, lastKVIndex uint64, maxShardId uint64) + SetMiningInfo(shardId uint64, difficulty, minedTime, blockMined uint64) + + ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) + ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) + ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) + ClientOnBlobsByRange(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) + ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) + ClientRecordTimeUsed(method string) func() + IncDropPeerCount() + IncPeerCount() + DecPeerCount() + ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) + ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) + ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) + ServerRecordTimeUsed(method string) func() + Document() []metrics.DocumentedMetric + RecordGossipEvent(evType int32) + SetPeerScores(map[string]float64) + RecordUp() + RecordInfo(version string) + Serve(ctx context.Context, hostname string, port int) error +} + +// Metrics tracks all the metrics for the op-node. type Metrics struct { + lastSubmissionTimes map[uint64]uint64 + + // Contract Status + LastL1Block prometheus.Gauge + LastKVIndex prometheus.Gauge + Shards prometheus.Gauge + Difficulties *prometheus.GaugeVec + LastSubmissionTime *prometheus.GaugeVec + MinedTime *prometheus.GaugeVec + BlockMined *prometheus.GaugeVec + + // P2P Metrics PeerScores *prometheus.GaugeVec GossipEventsTotal *prometheus.CounterVec + + SyncClientRequestsTotal *prometheus.CounterVec + SyncClientRequestDurationSeconds *prometheus.HistogramVec + SyncClientState *prometheus.GaugeVec + SyncClientPeerRequestsTotal *prometheus.CounterVec + SyncClientPeerRequestDurationSeconds *prometheus.HistogramVec + SyncClientPeerState *prometheus.GaugeVec + + SyncClientPerfCallTotal *prometheus.CounterVec + SyncClientPerfCallDurationSeconds *prometheus.HistogramVec + + PeerCount prometheus.Gauge + DropPeerCount prometheus.Counter + + SyncServerHandleReqTotal *prometheus.CounterVec + SyncServerHandleReqDurationSeconds *prometheus.HistogramVec + SyncServerHandleReqState *prometheus.GaugeVec + SyncServerHandleReqTotalPerPeer *prometheus.CounterVec + SyncServerHandleReqDurationSecondsPerPeer *prometheus.HistogramVec + SyncServerHandleReqStatePerPeer *prometheus.GaugeVec + SyncServerPerfCallTotal *prometheus.CounterVec + SyncServerPerfCallDurationSeconds *prometheus.HistogramVec + + Info *prometheus.GaugeVec + Up prometheus.Gauge + + registry *prometheus.Registry + factory metrics.Factory +} + +var _ Metricer = (*Metrics)(nil) + +// NewMetrics creates a new [Metrics] instance with the given process name. +func NewMetrics(procName string) *Metrics { + if procName == "" { + procName = "default" + } + ns := Namespace + "_" + procName + + registry := prometheus.NewRegistry() + registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + registry.MustRegister(collectors.NewGoCollector()) + factory := metrics.With(registry) + return &Metrics{ + lastSubmissionTimes: make(map[uint64]uint64), + LastL1Block: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "last_l1_block", + Help: "the last l1 block monitored", + }), + + LastKVIndex: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "last_kv_index", + Help: "the last kv index in the l1 miner contract", + }), + + Shards: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "max_shard_id", + Help: "the max shard id support by the l1 miner contract", + }), + + Difficulties: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "difficulty_of_shards", + Help: "The difficulty of shards in the l1 miner contract", + }, []string{ + "shard_id", + }), + + LastSubmissionTime: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "last_submission_time_of_shards", + Help: "The last time of shards in the l1 miner contract", + }, []string{ + "shard_id", + }), + + MinedTime: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "last_mined_time_of_shards", + Help: "The time used by mining of shards in the l1 miner contract", + }, []string{ + "shard_id", + "block_mined", + }), + + BlockMined: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: ContractMetrics, + Name: "block_mined_of_shards", + Help: "The block mined of shards in the l1 miner contract", + }, []string{ + "shard_id", + }), + + SyncClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "requests_total", + Help: "Total P2P requests sent", + }, []string{ + "p2p_method", + "result_code", + }), + + SyncClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "request_duration_seconds", + Buckets: []float64{}, + Help: "Duration of P2P requests", + }, []string{ + "p2p_method", + "result_code", + }), + + SyncClientState: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "sync_state", + Help: "The state of sync client", + }, []string{ + "state", + }), + + SyncClientPeerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "requests_total_for_peer", + Help: "Total P2P requests sent by a peer", + }, []string{ + "peer_id", + "p2p_method", + "result_code", + }), + + SyncClientPeerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "request_duration_seconds_for_peer", + Buckets: []float64{}, + Help: "Duration of P2P requests per peer", + }, []string{ + "peer_id", + "p2p_method", + "result_code", + }), + + SyncClientPeerState: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "sync_state_for_peer", + Help: "The sync state of peer", + }, []string{ + "peer_id", + "state", + }), + + SyncClientPerfCallTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "calls_total", + Help: "Number of call for method which need performance data", + }, []string{ + "method", + }), + + SyncClientPerfCallDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "call_duration_seconds", + Buckets: []float64{}, + Help: "Duration of calls", + }, []string{ + "method", + }), + + PeerCount: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "peer_count", + Help: "Count of currently connected p2p peers", + }), + + DropPeerCount: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncClientSubsystem, + Name: "drop_peer_count", + Help: "Count of peers drop by sync client deal to peer limit", + }), + + SyncServerHandleReqTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_total", + Help: "Number of P2P requests handle by sync server", + }, []string{ + "p2p_method", + "result_code", + }), + + SyncServerHandleReqDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_duration_seconds", + Buckets: []float64{}, + Help: "Duration of P2P requests", + }, []string{ + "p2p_method", + "result_code", + }), + + SyncServerHandleReqState: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_state", + Help: "The handle request state of sync server", + }, []string{ + "state", + }), + + SyncServerHandleReqTotalPerPeer: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_total_per_peer", + Help: "Number of P2P requests per peer", + }, []string{ + "peer_id", + "p2p_method", + "result_code", + }), + + SyncServerHandleReqDurationSecondsPerPeer: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_duration_seconds_per_peer", + Buckets: []float64{}, + Help: "Duration of P2P requests per peer", + }, []string{ + "peer_id", + "p2p_method", + "result_code", + }), + + SyncServerHandleReqStatePerPeer: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "handle_req_state_of_peer", + Help: "The handle request state of peer", + }, []string{ + "peer_id", + "state", + }), + + SyncServerPerfCallTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "calls_total", + Help: "Number of call for method which need performance data", + }, []string{ + "method", + }), + + SyncServerPerfCallDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: ns, + Subsystem: SyncServerSubsystem, + Name: "call_duration_seconds", + Buckets: []float64{}, + Help: "Duration of calls", + }, []string{ + "method", + }), + + PeerScores: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: "p2p", + Name: "peer_scores", + Help: "Count of peer scores grouped by score", + }, []string{ + "band", + }), + + GossipEventsTotal: factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: ns, + Subsystem: "p2p", + Name: "gossip_events_total", + Help: "Count of gossip events by type", + }, []string{ + "type", + }), + + Info: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: ns, + Name: "info", + Help: "Pseudo-metric tracking version and config info", + }, []string{ + "version", + }), + + Up: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Name: "up", + Help: "1 if the op node has finished starting up", + }), + + registry: registry, + + factory: factory, + } +} + +func (m *Metrics) Document() []metrics.DocumentedMetric { + return m.factory.Document() +} + +// Serve starts the metrics server on the given hostname and port. +// The server will be closed when the passed-in context is cancelled. +func (m *Metrics) Serve(ctx context.Context, hostname string, port int) error { + addr := net.JoinHostPort(hostname, strconv.Itoa(port)) + server := ophttp.NewHttpServer(promhttp.InstrumentMetricHandler( + m.registry, promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{}), + )) + server.Addr = addr + go func() { + <-ctx.Done() + server.Close() + }() + return server.ListenAndServe() +} + +func (m *Metrics) SetLastKVIndexAndMaxShardId(lastL1Block, lastKVIndex uint64, maxShardId uint64) { + m.LastL1Block.Set(float64(lastL1Block)) + m.LastKVIndex.Set(float64(lastKVIndex)) + m.Shards.Set(float64(maxShardId)) +} + +func (m *Metrics) SetMiningInfo(shardId uint64, difficulty, minedTime, blockMined uint64) { + m.Difficulties.WithLabelValues(fmt.Sprintf("%d", shardId)).Set(float64(difficulty)) + m.LastSubmissionTime.WithLabelValues(fmt.Sprintf("%d", shardId)).Set(float64(minedTime)) + m.BlockMined.WithLabelValues(fmt.Sprintf("%d", shardId)).Set(float64(blockMined)) + if t, ok := m.lastSubmissionTimes[shardId]; ok && t != minedTime { + m.MinedTime.WithLabelValues(fmt.Sprintf("%d", shardId), fmt.Sprintf("%d", blockMined)).Set(float64(minedTime - t)) + } + m.lastSubmissionTimes[shardId] = minedTime } func (m *Metrics) RecordGossipEvent(evType int32) { @@ -21,3 +432,194 @@ func (m *Metrics) SetPeerScores(scores map[string]float64) { m.PeerScores.WithLabelValues(label).Set(score) } } + +func (m *Metrics) ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { + code := strconv.FormatUint(uint64(resultCode), 10) + m.SyncClientRequestsTotal.WithLabelValues("get_blobs_by_range", code).Inc() + m.SyncClientRequestDurationSeconds.WithLabelValues("get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) + m.SyncClientPeerRequestsTotal.WithLabelValues(peerID, "get_blobs_by_range", code).Inc() + m.SyncClientPeerRequestDurationSeconds.WithLabelValues(peerID, "get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { + code := strconv.FormatUint(uint64(resultCode), 10) + m.SyncClientRequestsTotal.WithLabelValues("get_blobs_by_list", code).Inc() + m.SyncClientRequestDurationSeconds.WithLabelValues("get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) + m.SyncClientPeerRequestsTotal.WithLabelValues(peerID, "get_blobs_by_list", code).Inc() + m.SyncClientPeerRequestDurationSeconds.WithLabelValues(peerID, "get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) { + method := "fillEmpty" + m.SyncClientPerfCallTotal.WithLabelValues(method).Add(float64(count)) + m.SyncClientPerfCallDurationSeconds.WithLabelValues(method).Observe(float64(duration) / float64(time.Second) / float64(count)) +} + +func (m *Metrics) ClientOnBlobsByRange(peerID string, reqBlobCount, retBlobCount, insertedCount uint64, duration time.Duration) { + m.SyncClientState.WithLabelValues("reqBlobCount").Add(float64(reqBlobCount)) + m.SyncClientState.WithLabelValues("retBlobCount").Add(float64(retBlobCount)) + m.SyncClientState.WithLabelValues("insertedBlobCount").Add(float64(insertedCount)) + + m.SyncClientPeerState.WithLabelValues(peerID, "reqBlobCount").Add(float64(reqBlobCount)) + m.SyncClientPeerState.WithLabelValues(peerID, "retBlobCount").Add(float64(retBlobCount)) + m.SyncClientPeerState.WithLabelValues(peerID, "insertedBlobCount").Add(float64(insertedCount)) + + method := "onBlobsByRange" + m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() + m.SyncClientPerfCallDurationSeconds.WithLabelValues(method).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { + m.SyncClientState.WithLabelValues("reqBlobCount").Add(float64(reqCount)) + m.SyncClientState.WithLabelValues("retBlobCount").Add(float64(retBlobCount)) + m.SyncClientState.WithLabelValues("insertedBlobCount").Add(float64(insertedCount)) + + m.SyncClientPeerState.WithLabelValues(peerID, "reqBlobCount").Add(float64(reqCount)) + m.SyncClientPeerState.WithLabelValues(peerID, "retBlobCount").Add(float64(retBlobCount)) + m.SyncClientPeerState.WithLabelValues(peerID, "insertedBlobCount").Add(float64(insertedCount)) + + method := "onBlobsByList" + m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() + m.SyncClientPerfCallDurationSeconds.WithLabelValues(method).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ClientRecordTimeUsed(method string) func() { + m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() + timer := prometheus.NewTimer(m.SyncClientPerfCallDurationSeconds.WithLabelValues(method)) + return func() { + timer.ObserveDuration() + } +} + +func (m *Metrics) IncDropPeerCount() { + m.DropPeerCount.Inc() +} + +func (m *Metrics) IncPeerCount() { + m.PeerCount.Inc() +} + +func (m *Metrics) DecPeerCount() { + m.PeerCount.Dec() +} + +func (m *Metrics) ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { + code := strconv.FormatUint(uint64(resultCode), 10) + m.SyncServerHandleReqTotal.WithLabelValues("get_blobs_by_range", code).Inc() + m.SyncServerHandleReqDurationSeconds.WithLabelValues("get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) + + m.SyncServerHandleReqTotalPerPeer.WithLabelValues(peerID, "get_blobs_by_range", code).Inc() + m.SyncServerHandleReqDurationSecondsPerPeer.WithLabelValues(peerID, "get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { + code := strconv.FormatUint(uint64(resultCode), 10) + m.SyncServerHandleReqTotal.WithLabelValues("get_blobs_by_list", code).Inc() + m.SyncServerHandleReqDurationSeconds.WithLabelValues("get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) + + m.SyncServerHandleReqTotalPerPeer.WithLabelValues(peerID, "get_blobs_by_list", code).Inc() + m.SyncServerHandleReqDurationSecondsPerPeer.WithLabelValues(peerID, "get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) +} + +func (m *Metrics) ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) { + m.SyncServerHandleReqState.WithLabelValues("read").Add(float64(read)) + m.SyncServerHandleReqState.WithLabelValues("sucRead").Add(float64(sucRead)) + m.SyncServerHandleReqStatePerPeer.WithLabelValues(peerID, "read").Add(float64(read)) + m.SyncServerHandleReqStatePerPeer.WithLabelValues(peerID, "sucRead").Add(float64(sucRead)) + + method := "readBlobs" + m.SyncServerPerfCallTotal.WithLabelValues(method).Inc() + m.SyncServerPerfCallDurationSeconds.WithLabelValues(method).Observe(float64(timeUse) / float64(time.Second)) +} + +func (m *Metrics) ServerRecordTimeUsed(method string) func() { + m.SyncServerPerfCallTotal.WithLabelValues(method).Inc() + timer := prometheus.NewTimer(m.SyncServerPerfCallDurationSeconds.WithLabelValues(method)) + return func() { + timer.ObserveDuration() + } +} + +// RecordInfo sets a pseudo-metric that contains versioning and +// config info for the opnode. +func (m *Metrics) RecordInfo(version string) { + m.Info.WithLabelValues(version).Set(1) +} + +// RecordUp sets the up metric to 1. +func (m *Metrics) RecordUp() { + prometheus.MustRegister() + m.Up.Set(1) +} + +type noopMetricer struct{} + +var NoopMetrics = new(noopMetricer) + +func (n *noopMetricer) Document() []metrics.DocumentedMetric { + return nil +} + +func (m *noopMetricer) Serve(ctx context.Context, hostname string, port int) error { + return nil +} + +func (m *noopMetricer) SetLastKVIndexAndMaxShardId(lastL1Block, lastKVIndex uint64, maxShardId uint64) { +} + +func (m *noopMetricer) SetMiningInfo(shardId uint64, difficulty, minedTime, blockMined uint64) { +} + +func (n *noopMetricer) ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { +} + +func (n *noopMetricer) ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { +} + +func (n *noopMetricer) ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) { +} + +func (n *noopMetricer) ClientOnBlobsByRange(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { + +} + +func (n *noopMetricer) ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { +} + +func (n *noopMetricer) ClientRecordTimeUsed(method string) func() { + return func() {} +} + +func (n *noopMetricer) IncDropPeerCount() { +} + +func (n *noopMetricer) IncPeerCount() { +} + +func (n *noopMetricer) DecPeerCount() { +} + +func (n *noopMetricer) ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { +} + +func (n *noopMetricer) ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { +} + +func (n *noopMetricer) ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) { +} + +func (n *noopMetricer) ServerRecordTimeUsed(method string) func() { + return func() {} +} + +func (m *noopMetricer) RecordGossipEvent(evType int32) { +} + +func (m *noopMetricer) SetPeerScores(scores map[string]float64) { +} + +func (n *noopMetricer) RecordInfo(version string) { +} + +func (n *noopMetricer) RecordUp() { +} diff --git a/ethstorage/p2p/protocol/metrics.go b/ethstorage/p2p/protocol/metrics.go deleted file mode 100644 index a7e3eef8..00000000 --- a/ethstorage/p2p/protocol/metrics.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2022-2023, EthStorage. -// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE - -package protocol - -import ( - "strconv" - "time" - - "github.com/ethereum-optimism/optimism/op-service/metrics" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" -) - -const ( - Namespace = "p2p" - - SyncServerSubsystem = "sync_server" - SyncClientSubsystem = "sync_client" -) - -type Metricer interface { - ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) - ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) - ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) - ClientOnBlobsByRange(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) - ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) - ClientRecordTimeUsed(method string) func() - IncDropPeerCount() - IncPeerCount() - DecPeerCount() - ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) - ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) - ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) - ServerRecordTimeUsed(method string) func() -} - -// Metrics tracks all the metrics for the op-node. -type Metrics struct { - SyncClientRequestsTotal *prometheus.CounterVec - SyncClientRequestDurationSeconds *prometheus.HistogramVec - SyncClientState *prometheus.GaugeVec - SyncClientPeerRequestsTotal *prometheus.CounterVec - SyncClientPeerRequestDurationSeconds *prometheus.HistogramVec - SyncClientPeerState *prometheus.GaugeVec - - SyncClientPerfCallTotal *prometheus.CounterVec - SyncClientPerfCallDurationSeconds *prometheus.HistogramVec - - // P2P Metrics - PeerCount prometheus.Gauge - DropPeerCount prometheus.Counter - - SyncServerHandleReqTotal *prometheus.CounterVec - SyncServerHandleReqDurationSeconds *prometheus.HistogramVec - SyncServerHandleReqState *prometheus.GaugeVec - SyncServerHandleReqTotalPerPeer *prometheus.CounterVec - SyncServerHandleReqDurationSecondsPerPeer *prometheus.HistogramVec - SyncServerHandleReqStatePerPeer *prometheus.GaugeVec - SyncServerPerfCallTotal *prometheus.CounterVec - SyncServerPerfCallDurationSeconds *prometheus.HistogramVec -} - -var _ Metricer = (*Metrics)(nil) - -// NewMetrics creates a new [Metrics] instance with the given process name. -func NewMetrics(procName string) *Metrics { - if procName == "" { - procName = "default" - } - ns := Namespace + "_" + procName - - registry := prometheus.NewRegistry() - registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) - registry.MustRegister(collectors.NewGoCollector()) - factory := metrics.With(registry) - return &Metrics{ - SyncClientRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "requests_total", - Help: "Total P2P requests sent", - }, []string{ - "p2p_method", - "result_code", - }), - - SyncClientRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "request_duration_seconds", - Buckets: []float64{}, - Help: "Duration of P2P requests", - }, []string{ - "p2p_method", - "result_code", - }), - - SyncClientState: factory.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "sync_state", - Help: "The state of sync client", - }, []string{ - "state", - }), - - SyncClientPeerRequestsTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "requests_total_for_peer", - Help: "Total P2P requests sent by a peer", - }, []string{ - "peer_id", - "p2p_method", - "result_code", - }), - - SyncClientPeerRequestDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "request_duration_seconds_for_peer", - Buckets: []float64{}, - Help: "Duration of P2P requests per peer", - }, []string{ - "peer_id", - "p2p_method", - "result_code", - }), - - SyncClientPeerState: factory.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "sync_state_for_peer", - Help: "The sync state of peer", - }, []string{ - "peer_id", - "state", - }), - - SyncClientPerfCallTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "calls_total", - Help: "Number of call for method which need performance data", - }, []string{ - "method", - }), - - SyncClientPerfCallDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "call_duration_seconds", - Buckets: []float64{}, - Help: "Duration of calls", - }, []string{ - "method", - }), - - PeerCount: factory.NewGauge(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "peer_count", - Help: "Count of currently connected p2p peers", - }), - - DropPeerCount: factory.NewGauge(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncClientSubsystem, - Name: "drop_peer_count", - Help: "Count of peers drop by sync client deal to peer limit", - }), - - SyncServerHandleReqTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_total", - Help: "Number of P2P requests handle by sync server", - }, []string{ - "p2p_method", - "result_code", - }), - - SyncServerHandleReqDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_duration_seconds", - Buckets: []float64{}, - Help: "Duration of P2P requests", - }, []string{ - "p2p_method", - "result_code", - }), - - SyncServerHandleReqState: factory.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_state", - Help: "The handle request state of sync server", - }, []string{ - "state", - }), - - SyncServerHandleReqTotalPerPeer: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_total_per_peer", - Help: "Number of P2P requests per peer", - }, []string{ - "peer_id", - "p2p_method", - "result_code", - }), - - SyncServerHandleReqDurationSecondsPerPeer: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_duration_seconds_per_peer", - Buckets: []float64{}, - Help: "Duration of P2P requests per peer", - }, []string{ - "peer_id", - "p2p_method", - "result_code", - }), - - SyncServerHandleReqStatePerPeer: factory.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "handle_req_state_of_peer", - Help: "The handle request state of peer", - }, []string{ - "peer_id", - "state", - }), - - SyncServerPerfCallTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "calls_total", - Help: "Number of call for method which need performance data", - }, []string{ - "method", - }), - - SyncServerPerfCallDurationSeconds: factory.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: SyncServerSubsystem, - Name: "call_duration_seconds", - Buckets: []float64{}, - Help: "Duration of calls", - }, []string{ - "method", - }), - } -} - -func (m *Metrics) ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { - code := strconv.FormatUint(uint64(resultCode), 10) - m.SyncClientRequestsTotal.WithLabelValues("get_blobs_by_range", code).Inc() - m.SyncClientRequestDurationSeconds.WithLabelValues("get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) - m.SyncClientPeerRequestsTotal.WithLabelValues(peerID, "get_blobs_by_range", code).Inc() - m.SyncClientPeerRequestDurationSeconds.WithLabelValues(peerID, "get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { - code := strconv.FormatUint(uint64(resultCode), 10) - m.SyncClientRequestsTotal.WithLabelValues("get_blobs_by_list", code).Inc() - m.SyncClientRequestDurationSeconds.WithLabelValues("get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) - m.SyncClientPeerRequestsTotal.WithLabelValues(peerID, "get_blobs_by_list", code).Inc() - m.SyncClientPeerRequestDurationSeconds.WithLabelValues(peerID, "get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) { - method := "fillEmpty" - m.SyncClientPerfCallTotal.WithLabelValues(method).Add(float64(count)) - m.SyncClientPerfCallTotal.WithLabelValues(method).Add(float64(duration) / float64(time.Second) / float64(count)) -} - -func (m *Metrics) ClientOnBlobsByRange(peerID string, reqBlobCount, retBlobCount, insertedCount uint64, duration time.Duration) { - m.SyncClientState.WithLabelValues("reqBlobCount").Add(float64(reqBlobCount)) - m.SyncClientState.WithLabelValues("retBlobCount").Add(float64(retBlobCount)) - m.SyncClientState.WithLabelValues("insertedBlobCount").Add(float64(insertedCount)) - - m.SyncClientPeerState.WithLabelValues(peerID, "reqBlobCount").Add(float64(reqBlobCount)) - m.SyncClientPeerState.WithLabelValues(peerID, "retBlobCount").Add(float64(retBlobCount)) - m.SyncClientPeerState.WithLabelValues(peerID, "insertedBlobCount").Add(float64(insertedCount)) - - method := "onBlobsByRange" - m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() - m.SyncClientPerfCallTotal.WithLabelValues(method).Add(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { - - method := "onBlobsByList" - m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() - m.SyncClientPerfCallTotal.WithLabelValues(method).Add(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ClientRecordTimeUsed(method string) func() { - m.SyncClientPerfCallTotal.WithLabelValues(method).Inc() - timer := prometheus.NewTimer(m.SyncClientPerfCallDurationSeconds.WithLabelValues(method)) - return func() { - timer.ObserveDuration() - } -} - -func (m *Metrics) IncDropPeerCount() { - m.DropPeerCount.Inc() -} - -func (m *Metrics) IncPeerCount() { - m.PeerCount.Inc() -} - -func (m *Metrics) DecPeerCount() { - m.PeerCount.Dec() -} - -func (m *Metrics) ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { - code := strconv.FormatUint(uint64(resultCode), 10) - m.SyncServerHandleReqTotal.WithLabelValues("get_blobs_by_range", code).Inc() - m.SyncServerHandleReqDurationSeconds.WithLabelValues("get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) - - m.SyncServerHandleReqTotalPerPeer.WithLabelValues(peerID, "get_blobs_by_range", code).Inc() - m.SyncServerHandleReqDurationSecondsPerPeer.WithLabelValues(peerID, "get_blobs_by_range", code).Observe(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { - code := strconv.FormatUint(uint64(resultCode), 10) - m.SyncServerHandleReqTotal.WithLabelValues("get_blobs_by_list", code).Inc() - m.SyncServerHandleReqDurationSeconds.WithLabelValues("get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) - - m.SyncServerHandleReqTotalPerPeer.WithLabelValues(peerID, "get_blobs_by_list", code).Inc() - m.SyncServerHandleReqDurationSecondsPerPeer.WithLabelValues(peerID, "get_blobs_by_list", code).Observe(float64(duration) / float64(time.Second)) -} - -func (m *Metrics) ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) { - m.SyncServerHandleReqState.WithLabelValues("read").Add(float64(read)) - m.SyncServerHandleReqState.WithLabelValues("sucRead").Add(float64(sucRead)) - m.SyncServerHandleReqStatePerPeer.WithLabelValues(peerID, "read").Add(float64(read)) - m.SyncServerHandleReqStatePerPeer.WithLabelValues(peerID, "sucRead").Add(float64(sucRead)) - - method := "readBlobs" - m.SyncServerPerfCallTotal.WithLabelValues(method).Inc() - m.SyncServerPerfCallDurationSeconds.WithLabelValues(method).Observe(float64(timeUse) / float64(time.Second)) -} - -func (m *Metrics) ServerRecordTimeUsed(method string) func() { - m.SyncServerPerfCallTotal.WithLabelValues(method).Inc() - timer := prometheus.NewTimer(m.SyncServerPerfCallDurationSeconds.WithLabelValues(method)) - return func() { - timer.ObserveDuration() - } -} - -type noopMetricer struct{} - -var NoopMetrics Metricer = new(noopMetricer) - -func (n *noopMetricer) ClientGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { -} - -func (n *noopMetricer) ClientGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { -} - -func (n *noopMetricer) ClientFillEmptyBlobsEvent(count uint64, duration time.Duration) { -} - -func (n *noopMetricer) ClientOnBlobsByRange(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { - -} - -func (n *noopMetricer) ClientOnBlobsByList(peerID string, reqCount, retBlobCount, insertedCount uint64, duration time.Duration) { -} - -func (n *noopMetricer) ClientRecordTimeUsed(method string) func() { - return func() {} -} - -func (n *noopMetricer) IncDropPeerCount() { -} - -func (n *noopMetricer) IncPeerCount() { -} - -func (n *noopMetricer) DecPeerCount() { -} - -func (n *noopMetricer) ServerGetBlobsByRangeEvent(peerID string, resultCode byte, duration time.Duration) { -} - -func (n *noopMetricer) ServerGetBlobsByListEvent(peerID string, resultCode byte, duration time.Duration) { -} - -func (n *noopMetricer) ServerReadBlobs(peerID string, read, sucRead uint64, timeUse time.Duration) { -} - -func (n *noopMetricer) ServerRecordTimeUsed(method string) func() { - return func() {} -} diff --git a/ethstorage/p2p/protocol/sync_test.go b/ethstorage/p2p/protocol/sync_test.go index aaf9d98e..27889ff6 100644 --- a/ethstorage/p2p/protocol/sync_test.go +++ b/ethstorage/p2p/protocol/sync_test.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/metrics" prv "github.com/ethstorage/go-ethstorage/ethstorage/prover" "github.com/ethstorage/go-ethstorage/ethstorage/rollup" "github.com/libp2p/go-libp2p/core/host" @@ -515,7 +516,7 @@ func TestSync_RequestL2Range(t *testing.T) { db = rawdb.NewMemoryDatabase() mux = new(event.Feed) shards = make(map[common.Address][]uint64) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: false, @@ -557,9 +558,9 @@ func TestSync_RequestL2Range(t *testing.T) { } // create local and remote hosts, set up sync client and server - localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.loadSyncStatus() - remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, metrics, testLog) + remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, m, testLog) connect(t, localHost, remoteHost, shards, shards) time.Sleep(2 * time.Second) @@ -582,7 +583,7 @@ func TestSync_RequestL2List(t *testing.T) { db = rawdb.NewMemoryDatabase() mux = new(event.Feed) shards = make(map[common.Address][]uint64) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: false, @@ -624,9 +625,9 @@ func TestSync_RequestL2List(t *testing.T) { } // create local and remote hosts, set up sync client and server - localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.loadSyncStatus() - remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, metrics, testLog) + remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, m, testLog) connect(t, localHost, remoteHost, shards, shards) indexes := make([]uint64, 0) @@ -650,7 +651,7 @@ func TestSaveAndLoadSyncStatus(t *testing.T) { lastKvIndex = entries*3 - 20 db = rawdb.NewMemoryDatabase() mux = new(event.Feed) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") expectedTimeUsed = time.Second * 10 rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), @@ -671,7 +672,7 @@ func TestSaveAndLoadSyncStatus(t *testing.T) { l1 := NewMockL1Source(lastKvIndex, metafileName) sm := ethstorage.NewStorageManager(shardManager, l1) - _, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + _, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.loadSyncStatus() indexes := []uint64{30, 5, 8} syncCl.tasks[0].healTask.insert(indexes) @@ -748,7 +749,7 @@ func testSync(t *testing.T, chunkSize, kvSize, kvEntries uint64, localShards []u ctx, cancel = context.WithCancel(context.Background()) mux = new(event.Feed) localShardMap = make(map[common.Address][]uint64) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: true, @@ -779,7 +780,7 @@ func testSync(t *testing.T, chunkSize, kvSize, kvEntries uint64, localShards []u l1 := NewMockL1Source(lastKvIndex, metafileName) sm := ethstorage.NewStorageManager(shardManager, l1) data := makeKVStorage(contract, localShards, chunkSize, kvSize, kvEntries, lastKvIndex, common.Address{}, encodeType, metafile) - localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.Start() finalExcludedList := remotePeers[0].excludedList @@ -799,7 +800,7 @@ func testSync(t *testing.T, chunkSize, kvSize, kvEntries uint64, localShards []u } rShardMap := make(map[common.Address][]uint64) rShardMap[contract] = rPeer.shards - remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, metrics, testLog) + remoteHost := createRemoteHost(t, ctx, rollupCfg, smr, m, testLog) connect(t, localHost, remoteHost, localShardMap, rShardMap) } @@ -976,7 +977,7 @@ func TestAddPeerDuringSyncing(t *testing.T) { shards = []uint64{0} shardMap = make(map[common.Address][]uint64) excludedList = getRandomU64InRange(make(map[uint64]struct{}), 0, 15, 3) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: true, @@ -1006,7 +1007,7 @@ func TestAddPeerDuringSyncing(t *testing.T) { // fill empty to excludedList for verify KVs fillEmpty(shardManager, excludedList) - localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.Start() data := makeKVStorage(contract, shards, defaultChunkSize, kvSize, kvEntries, lastKvIndex, common.Address{}, encodeType, metafile) @@ -1020,7 +1021,7 @@ func TestAddPeerDuringSyncing(t *testing.T) { shardMiner: common.Address{}, blobPayloads: pData, } - remoteHost0 := createRemoteHost(t, ctx, rollupCfg, smr0, metrics, testLog) + remoteHost0 := createRemoteHost(t, ctx, rollupCfg, smr0, m, testLog) connect(t, localHost, remoteHost0, shardMap, shardMap) time.Sleep(2 * time.Second) @@ -1038,7 +1039,7 @@ func TestAddPeerDuringSyncing(t *testing.T) { shardMiner: common.Address{}, blobPayloads: data[contract], } - remoteHost1 := createRemoteHost(t, ctx, rollupCfg, smr1, metrics, testLog) + remoteHost1 := createRemoteHost(t, ctx, rollupCfg, smr1, m, testLog) connect(t, localHost, remoteHost1, shardMap, shardMap) checkStall(t, 3, mux, cancel) @@ -1058,7 +1059,7 @@ func TestCloseSyncWhileFillEmpty(t *testing.T) { mux = new(event.Feed) shards = []uint64{0} shardMap = make(map[common.Address][]uint64) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: true, @@ -1086,7 +1087,7 @@ func TestCloseSyncWhileFillEmpty(t *testing.T) { l1 := NewMockL1Source(lastKvIndex, metafileName) sm := ethstorage.NewStorageManager(shardManager, l1) - _, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + _, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.Start() time.Sleep(10 * time.Millisecond) syncCl.Close() @@ -1112,7 +1113,7 @@ func TestAddPeerAfterSyncDone(t *testing.T) { shards = []uint64{0} shardMap = make(map[common.Address][]uint64) excludedList = make(map[uint64]struct{}) - metrics = NewMetrics("sync_test") + m = metrics.NewMetrics("sync_test") rollupCfg = &rollup.EsConfig{ L2ChainID: new(big.Int).SetUint64(3333), MetricsEnable: true, @@ -1142,7 +1143,7 @@ func TestAddPeerAfterSyncDone(t *testing.T) { // fill empty to excludedList for verify KVs fillEmpty(shardManager, excludedList) - localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, metrics, mux) + localHost, syncCl := createLocalHostAndSyncClient(t, testLog, rollupCfg, db, sm, m, mux) syncCl.Start() data := makeKVStorage(contract, shards, defaultChunkSize, kvSize, kvEntries, lastKvIndex, common.Address{}, encodeType, metafile) @@ -1155,7 +1156,7 @@ func TestAddPeerAfterSyncDone(t *testing.T) { shardMiner: common.Address{}, blobPayloads: data[contract], } - remoteHost0 := createRemoteHost(t, ctx, rollupCfg, smr0, metrics, testLog) + remoteHost0 := createRemoteHost(t, ctx, rollupCfg, smr0, m, testLog) connect(t, localHost, remoteHost0, shardMap, shardMap) checkStall(t, 3, mux, cancel) @@ -1173,7 +1174,7 @@ func TestAddPeerAfterSyncDone(t *testing.T) { shardMiner: common.Address{}, blobPayloads: data[contract], } - remoteHost1 := createRemoteHost(t, ctx, rollupCfg, smr1, metrics, testLog) + remoteHost1 := createRemoteHost(t, ctx, rollupCfg, smr1, m, testLog) connect(t, localHost, remoteHost1, shardMap, shardMap) time.Sleep(10 * time.Millisecond) diff --git a/ethstorage/p2p/protocol/syncclient.go b/ethstorage/p2p/protocol/syncclient.go index c02bfb9c..c43bab81 100644 --- a/ethstorage/p2p/protocol/syncclient.go +++ b/ethstorage/p2p/protocol/syncclient.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/metrics" prv "github.com/ethstorage/go-ethstorage/ethstorage/prover" "github.com/ethstorage/go-ethstorage/ethstorage/rollup" "github.com/libp2p/go-libp2p/core/network" @@ -182,7 +183,7 @@ type SyncClient struct { } func NewSyncClient(log log.Logger, cfg *rollup.EsConfig, newStream newStreamFn, storageManager StorageManager, params *SyncerParams, - db ethdb.Database, metrics SyncClientMetrics, mux *event.Feed) *SyncClient { + db ethdb.Database, m SyncClientMetrics, mux *event.Feed) *SyncClient { ctx, cancel := context.WithCancel(context.Background()) maxFillEmptyTaskTreads = int32(runtime.NumCPU() - 2) if maxFillEmptyTaskTreads < 1 { @@ -190,8 +191,8 @@ func NewSyncClient(log log.Logger, cfg *rollup.EsConfig, newStream newStreamFn, } maxKvCountPerReq = params.MaxRequestSize / storageManager.MaxKvSize() shardCount := len(storageManager.Shards()) - if metrics == nil { - metrics = NoopMetrics + if m == nil { + m = metrics.NoopMetrics } c := &SyncClient{ @@ -199,7 +200,7 @@ func NewSyncClient(log log.Logger, cfg *rollup.EsConfig, newStream newStreamFn, mux: mux, cfg: cfg, db: db, - metrics: metrics, + metrics: m, newStreamFn: newStream, idlerPeers: make(map[peer.ID]struct{}), peers: make(map[peer.ID]*Peer), diff --git a/ethstorage/p2p/protocol/syncserver.go b/ethstorage/p2p/protocol/syncserver.go index 9ea59568..51e9773b 100644 --- a/ethstorage/p2p/protocol/syncserver.go +++ b/ethstorage/p2p/protocol/syncserver.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/metrics" "github.com/ethstorage/go-ethstorage/ethstorage/rollup" "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/libp2p/go-libp2p/core/network" @@ -67,7 +68,7 @@ type SyncServer struct { globalRequestsRL *rate.Limiter } -func NewSyncServer(cfg *rollup.EsConfig, storageManager StorageManagerReader, metrics SyncServerMetrics) *SyncServer { +func NewSyncServer(cfg *rollup.EsConfig, storageManager StorageManagerReader, m SyncServerMetrics) *SyncServer { // We should never allow over 1000 different peers to churn through quickly, // so it's fine to prune rate-limit details past this. @@ -75,13 +76,13 @@ func NewSyncServer(cfg *rollup.EsConfig, storageManager StorageManagerReader, me // 3 sync requests per second, with 2 burst globalRequestsRL := rate.NewLimiter(globalServerBlocksRateLimit, globalServerBlocksBurst) - if metrics == nil { - metrics = NoopMetrics + if m == nil { + m = metrics.NoopMetrics } return &SyncServer{ cfg: cfg, storageManager: storageManager, - metrics: metrics, + metrics: m, peerRateLimits: peerRateLimits, globalRequestsRL: globalRequestsRL, } From bdba4bae4e5c1d531a43c77a9004a0b37220dca6 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 10:53:37 +0800 Subject: [PATCH 41/68] add code lint --- .github/workflows/golangci-lint.yml | 55 ++++++ .gitignore | 1 + golangci.yml | 282 ++++++++++++++++++++++++++++ 3 files changed, 338 insertions(+) create mode 100644 .github/workflows/golangci-lint.yml create mode 100644 golangci.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 00000000..dcf1a355 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,55 @@ +name: golangci-lint +on: + push: + branches: + - main + - lint + pull_request: + +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: '1.20' + cache: false + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Require: The version of golangci-lint to use. + # When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version. + # When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit. + version: v1.54 + + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + # + # Note: By default, the `.golangci.yml` file should be at the root of the repository. + # The location of the configuration file can be changed by using `--config=` + # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true + + # Optional: if set to true, then all caching functionality will be completely disabled, + # takes precedence over all other caching options. + # skip-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/go/pkg. + # skip-pkg-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/.cache/go-build. + # skip-build-cache: true + + # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. + # install-mode: "goinstall" diff --git a/.gitignore b/.gitignore index 3cb65c12..804973cb 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,4 @@ profile.cov **/yarn-error.log logs/ +/golangci_back.yml diff --git a/golangci.yml b/golangci.yml new file mode 100644 index 00000000..854d694e --- /dev/null +++ b/golangci.yml @@ -0,0 +1,282 @@ +# Options for analysis running. +run: + # The default concurrency value is the number of available CPU. + concurrency: 4 + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 5m + # If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + # + # Allowed values: readonly|vendor|mod + # By default, it isn't set. + modules-download-mode: readonly + # Exit code when at least one issue was found. + # Default: 1 + issues-exit-code: 10 + # Include test files or not. + # Default: true + tests: true + # Which dirs to skip: issues from them won't be reported. + # Can use regexp here: `generated.*`, regexp is applied on full path, + # including the path prefix if one is set. + # Default value is empty list, + # but default dirs are skipped independently of this option's value (see skip-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work on Windows. + skip-dirs: + - bin + - vendor + # Enables skipping of directories: + # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + # Default: true + skip-dirs-use-default: true + # Which files to skip: they will be analyzed, but issues from them won't be reported. + # Default value is empty list, + # but there is no need to include all autogenerated files, + # we confidently recognize autogenerated files. + # If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work on Windows. + skip-files: + +# output configuration options +output: + # Format: colored-line-number|line-number|json|colored-tab|tab|checkstyle|code-climate|junit-xml|github-actions|teamcity + # + # Multiple can be specified by separating them by comma, output can be provided + # for each of them by separating format name and path by colon symbol. + # Output path can be either `stdout`, `stderr` or path to the file to write to. + # Example: "checkstyle:report.xml,json:stdout,colored-line-number" + # + # Default: colored-line-number + format: colored-line-number + # Print lines of code with issue. + # Default: true + print-issued-lines: true + # Print linter name in the end of issue text. + # Default: true + print-linter-name: true + +# lint setting +linters-settings: + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: false + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + # Such cases aren't reported by default. + # Default: false + check-blank: true + # DEPRECATED comma-separated list of pairs of the form pkg:regex + # + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details + ignore: fmt:.*,io/ioutil:^Read.* + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 60 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 40 + + govet: + # Report about shadowed variables. + # Default: false + check-shadowing: true + # Enable analyzers by name. + # (in addition to default: + # appends, asmdecl, assign, atomic, bools, buildtag, cgocall, composites, copylocks, defers, directive, errorsas, + # framepointer, httpresponse, ifaceassert, loopclosure, lostcancel, nilfunc, printf, shift, sigchanyzer, slog, + # stdmethods, stringintconv, structtag, testinggoroutine, tests, timeformat, unmarshal, unreachable, unsafeptr, + # unusedresult + # ). + # Run `go tool vet help` to see all analyzers. + # Default: [] + enable: + # - atomicalign + enable-all: false + disable: + # - shadow + disable-all: false + + golint: + # minimal confidence for issues, default is 0.8 + min-confidence: 0.8 + + gocyclo: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 35 + + gocognit: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 35 + + maligned: + # Print struct with more effective memory layout or not. + # Default: false + suggest-new: true + auto-fix: true + + dupl: + # tokens count to trigger issue, 150 by default + threshold: 100 + + goconst: + # minimal length of string constant, 3 by default + min-len: 3 + # minimal occurrences count to trigger, 3 by default + min-occurrences: 3 + + depguard: + list-type: blacklist + include-go-root: false + + lll: + line-length: 240 + tab-width: 1 + + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US + ignore-words: + - someword + + nakedret: + # Make an issue if func has more lines of code than this setting, and it has naked returns. + # Default: 30 + max-func-lines: 31 + + unused: + check-exported: true + unparam: + check-exported: false + gocritic: + enabled-checks: + disabled-checks: + enabled-tags: + - performance + settings: # settings passed to gocritic + captLocal: # must be valid enabled check name + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 + +linters: + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - errcheck + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - misspell + - scopelint + - staticcheck + - typecheck + - unconvert + - unparam + - varcheck + - godox + - structcheck + - maligned + enable-all: false + disable: + - unused + - stylecheck + - funlen + - whitespace + - dupl + - golint + - lll + - wsl + - nakedret + - gochecknoglobals + disable-all: false + presets: + - bugs + fast: false + +issues: + # Show only new issues: if there are unstaged changes or untracked files, + # only those changes are analyzed, else only changes in HEAD~ are analyzed. + # It's a super-useful option for integration of golangci-lint into existing large codebase. + # It's not practical to fix all existing issues at the moment of integration: + # much better don't allow issues in new code. + # + # Default: false. + new: false + # List of regexps of issue texts to exclude. + # + # But independently of this option we use default exclude patterns, + # it can be disabled by `exclude-use-default: false`. + # To list all excluded by default patterns execute `golangci-lint run --help` + # + # Default: https://golangci-lint.run/usage/false-positives/#default-exclusions + exclude: + - /bin + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - gocyclo + - errcheck + - dupl + - gosec + # Run some linter only for test files by excluding its issues for everything else. + - path-except: _test\.go + linters: + - forbidigo + # Exclude known linters from partially hard-vendored code, + # which is impossible to exclude via `nolint` comments. + # `/` will be replaced by current OS file path separator to properly work on Windows. + - path: internal/hmac/ + text: "weak cryptographic primitive" + linters: + - gosec + # Exclude some `staticcheck` messages. + - linters: + - staticcheck + text: "SA9003:" + # Exclude `lll` issues for long lines with `go:generate`. + - linters: + - lll + source: "^//go:generate " + # Independently of option `exclude` we use default exclude patterns, + # it can be disabled by this option. + # To list all excluded by default patterns execute `golangci-lint run --help`. + # Default: true. + exclude-use-default: false + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 + From 9038472cbc0c0a612ebb933b30352f1590fc3a8c Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:11:43 +0800 Subject: [PATCH 42/68] open gofmt --- golangci.yml | 69 +++++++++++++++++++++------------------------------- 1 file changed, 28 insertions(+), 41 deletions(-) diff --git a/golangci.yml b/golangci.yml index 854d694e..624dcbd6 100644 --- a/golangci.yml +++ b/golangci.yml @@ -176,49 +176,36 @@ linters-settings: sizeThreshold: 32 linters: + disable-all: true + # Enable specific linter + # https://golangci-lint.run/usage/linters/#enabled-by-default enable: - - bodyclose - - deadcode - - depguard - - dogsled - - errcheck - - gochecknoinits - - goconst - - gocritic - - gocyclo +# - bodyclose +# - deadcode +# - depguard +# - dogsled +# - errcheck +# - gochecknoinits +# - goconst +# - gocritic +# - gocyclo - gofmt - - goimports - - gosec - - gosimple - - govet - - ineffassign - - interfacer - - misspell - - scopelint - - staticcheck - - typecheck - - unconvert - - unparam - - varcheck - - godox - - structcheck - - maligned - enable-all: false - disable: - - unused - - stylecheck - - funlen - - whitespace - - dupl - - golint - - lll - - wsl - - nakedret - - gochecknoglobals - disable-all: false - presets: - - bugs - fast: false +# - goimports +# - gosec +# - gosimple +# - govet +# - ineffassign +# - interfacer +# - misspell +# - scopelint +# - staticcheck +# - typecheck +# - unconvert +# - unparam +# - varcheck +# - godox +# - structcheck +# - maligned issues: # Show only new issues: if there are unstaged changes or untracked files, From 404f6389e1a3376918688084fcdceb224876151f Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Thu, 30 Nov 2023 14:19:27 +0800 Subject: [PATCH 43/68] resolve comment --- ethstorage/storage_manager.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 570b5727..6a4cfc97 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -469,14 +469,12 @@ func (s *StorageManager) getKvMetas(kvIndices []uint64) ([][32]byte, error) { meta, ok := s.blobMetas[i] if ok { metas = append(metas, meta) + } else if i >= s.lastKvIdx { + meta := [32]byte{} + new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) + metas = append(metas, meta) } else { - if i >= s.lastKvIdx { - meta := [32]byte{} - new(big.Int).SetInt64(int64(i)).FillBytes(meta[0:5]) - metas = append(metas, meta) - } else { - return nil, errors.New("meta not found in blobMetas") - } + return nil, errors.New("meta not found in blobMetas") } } return metas, nil From f2de3e4917c13b731c2d12b7edb1b5e6ffc1e1b4 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:24:17 +0800 Subject: [PATCH 44/68] change path --- .github/workflows/golangci-lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index dcf1a355..d4e5dc17 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -37,6 +37,7 @@ jobs: # Note: By default, the `.golangci.yml` file should be at the root of the repository. # The location of the configuration file can be changed by using `--config=` # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 + golangci-lint: --config=/golangci.yml # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true From c3ac99b0b0a79264f7f60771b8425bba80bb5bee Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:25:42 +0800 Subject: [PATCH 45/68] change path --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index d4e5dc17..ab0d9ca8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -37,7 +37,7 @@ jobs: # Note: By default, the `.golangci.yml` file should be at the root of the repository. # The location of the configuration file can be changed by using `--config=` # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 - golangci-lint: --config=/golangci.yml + golangci-lint: --config=../../golangci.yml # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true From d4904d7a634e381cf208bc43ba7e1a33ad94f81e Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:33:44 +0800 Subject: [PATCH 46/68] change path --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index ab0d9ca8..b34dcb38 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -37,7 +37,7 @@ jobs: # Note: By default, the `.golangci.yml` file should be at the root of the repository. # The location of the configuration file can be changed by using `--config=` # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 - golangci-lint: --config=../../golangci.yml + golangci-lint: --config=./golangci.yml # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true From 710c2c07da1232f6aad375cfb9a0bddc46cb972b Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:48:00 +0800 Subject: [PATCH 47/68] change cofig --- .github/workflows/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index b34dcb38..974546b1 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -37,7 +37,7 @@ jobs: # Note: By default, the `.golangci.yml` file should be at the root of the repository. # The location of the configuration file can be changed by using `--config=` # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 - golangci-lint: --config=./golangci.yml + args: --config=./golangci.yml # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true From fcda943dbd1305d10ea2dbbd6a7004e15336c817 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 14:56:22 +0800 Subject: [PATCH 48/68] fix fmt error --- cmd/es-utils/utils/utils.go | 6 +- ethstorage/downloader/blob_cache.go | 74 +++++++-------- ethstorage/downloader/config.go | 6 +- ethstorage/downloader/downloader.go | 141 ++++++++++++++-------------- ethstorage/eth/beacon_client.go | 24 ++--- ethstorage/eth/polling_client.go | 1 - ethstorage/node/es_api.go | 12 +-- ethstorage/node/eth_api.go | 9 +- ethstorage/node/server.go | 10 +- ethstorage/p2p/host.go | 1 - ethstorage/signer/client.go | 2 +- ethstorage/storage_manager.go | 4 +- 12 files changed, 143 insertions(+), 147 deletions(-) diff --git a/cmd/es-utils/utils/utils.go b/cmd/es-utils/utils/utils.go index 83bd003f..1a2834a8 100644 --- a/cmd/es-utils/utils/utils.go +++ b/cmd/es-utils/utils/utils.go @@ -57,7 +57,7 @@ func SendBlobTx( h := crypto.Keccak256Hash([]byte(`upfrontPayment()`)) callMsg := ethereum.CallMsg{ - To: &to, + To: &to, Data: h[:], } bs, err := client.CallContract(context.Background(), callMsg, new(big.Int).SetInt64(-2)) @@ -76,8 +76,8 @@ func SendBlobTx( log.Crit("Invalid value param") } - if res[0].(* big.Int).Cmp(val) == 1 { - val = res[0].(* big.Int) + if res[0].(*big.Int).Cmp(val) == 1 { + val = res[0].(*big.Int) } value256, overflow := uint256.FromBig(val) diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index 45468e6f..bdcae02c 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -13,61 +13,61 @@ import ( ) type BlobCache struct { - blocks map[common.Hash]*blockBlobs - mu sync.RWMutex + blocks map[common.Hash]*blockBlobs + mu sync.RWMutex } func NewBlobCache() *BlobCache { - return &BlobCache{ - blocks: map[common.Hash]*blockBlobs{}, - } + return &BlobCache{ + blocks: map[common.Hash]*blockBlobs{}, + } } func (c *BlobCache) SetBlockBlobs(block *blockBlobs) { - c.mu.Lock() - defer c.mu.Unlock() - c.blocks[block.hash] = block + c.mu.Lock() + defer c.mu.Unlock() + c.blocks[block.hash] = block } func (c *BlobCache) Blobs(hash common.Hash) []blob { - c.mu.RLock() - defer c.mu.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() - if _, exist := c.blocks[hash]; !exist { - return nil - } + if _, exist := c.blocks[hash]; !exist { + return nil + } - res := []blob{} - for _, blob := range(c.blocks[hash].blobs) { - res = append(res, *blob) - } - return res + res := []blob{} + for _, blob := range c.blocks[hash].blobs { + res = append(res, *blob) + } + return res } func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { - c.mu.RLock() - defer c.mu.RUnlock() - - for _, block := range(c.blocks) { - for _, blob := range(block.blobs) { - if blob.kvIndex.Uint64() == idx && bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { - return blob.data - } - } - } - return nil + c.mu.RLock() + defer c.mu.RUnlock() + + for _, block := range c.blocks { + for _, blob := range block.blobs { + if blob.kvIndex.Uint64() == idx && bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { + return blob.data + } + } + } + return nil } // TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time // We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means // there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. func (c *BlobCache) Cleanup(finalized uint64) { - c.mu.Lock() - defer c.mu.Unlock() + c.mu.Lock() + defer c.mu.Unlock() - for hash, block := range(c.blocks) { - if block.number <= finalized { - delete(c.blocks, hash) - } - } -} \ No newline at end of file + for hash, block := range c.blocks { + if block.number <= finalized { + delete(c.blocks, hash) + } + } +} diff --git a/ethstorage/downloader/config.go b/ethstorage/downloader/config.go index b2cfe34c..b094f199 100644 --- a/ethstorage/downloader/config.go +++ b/ethstorage/downloader/config.go @@ -4,7 +4,7 @@ package downloader type Config struct { - DownloadStart int64 // which block should we download the blobs from - DownloadDump string // where to dump the download blobs - DownloadThreadNum int // how many threads that will be used to download the blobs into storage file + DownloadStart int64 // which block should we download the blobs from + DownloadDump string // where to dump the download blobs + DownloadThreadNum int // how many threads that will be used to download the blobs into storage file } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 968c9c7a..96dfb81c 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -26,20 +26,20 @@ import ( ) const ( - TrackLatest = iota // 0 - TrackSafe // 1 - TrackFinalized // 2 + TrackLatest = iota // 0 + TrackSafe // 1 + TrackFinalized // 2 - downloadBatchSize = 64 // 2 epoch + downloadBatchSize = 64 // 2 epoch ) var ( - downloaderPrefix = []byte("dl-") - lastDownloadKey = []byte("last-download-block") + downloaderPrefix = []byte("dl-") + lastDownloadKey = []byte("last-download-block") ) type Downloader struct { - Cache *BlobCache + Cache *BlobCache // latestHead and finalizedHead are shared among multiple threads and thus locks must be required when being accessed // others are only accessed by the downloader thread so it is safe to access them in DL thread without locks @@ -53,62 +53,61 @@ type Downloader struct { latestHead int64 dumpDir string minDurationForBlobsRequest uint64 - + // Request to download new blobs - dlLatestReq chan struct{} - dlFinalizedReq chan struct{} + dlLatestReq chan struct{} + dlFinalizedReq chan struct{} - log log.Logger - done chan struct{} - wg sync.WaitGroup - mu sync.Mutex + log log.Logger + done chan struct{} + wg sync.WaitGroup + mu sync.Mutex } type blob struct { - kvIndex *big.Int - kvSize *big.Int - hash common.Hash - data []byte + kvIndex *big.Int + kvSize *big.Int + hash common.Hash + data []byte } type blockBlobs struct { - timestamp uint64 - number uint64 - hash common.Hash - blobs []*blob + timestamp uint64 + number uint64 + hash common.Hash + blobs []*blob } - func NewDownloader( - l1Source *eth.PollingClient, + l1Source *eth.PollingClient, l1Beacon *eth.BeaconClient, db ethdb.Database, - sm *ethstorage.StorageManager, - downloadStart int64, - downloadDump string, + sm *ethstorage.StorageManager, + downloadStart int64, + downloadDump string, minDurationForBlobsRequest uint64, downloadThreadNum int, log log.Logger, -) *Downloader{ +) *Downloader { sm.DownloadThreadNum = downloadThreadNum return &Downloader{ - Cache: NewBlobCache(), - l1Source: l1Source, - l1Beacon: l1Beacon, - db: db, - sm: sm, - dumpDir: downloadDump, + Cache: NewBlobCache(), + l1Source: l1Source, + l1Beacon: l1Beacon, + db: db, + sm: sm, + dumpDir: downloadDump, minDurationForBlobsRequest: minDurationForBlobsRequest, - dlLatestReq: make(chan struct{}, 1), - dlFinalizedReq: make(chan struct{}, 1), - log: log, - done: make(chan struct{}), - lastDownloadBlock: downloadStart, + dlLatestReq: make(chan struct{}, 1), + dlFinalizedReq: make(chan struct{}, 1), + log: log, + done: make(chan struct{}), + lastDownloadBlock: downloadStart, } } // Start starts up the state loop. -func (s *Downloader) Start() error { +func (s *Downloader) Start() error { // user does NOT specify a download start in the flag if s.lastDownloadBlock == 0 { bs, err := s.db.Get(append(downloaderPrefix, lastDownloadKey...)) @@ -158,7 +157,7 @@ func (s *Downloader) OnL1Finalized(finalized uint64) { } s.finalizedHead = int64(finalized) s.mu.Unlock() - + select { case s.dlFinalizedReq <- struct{}{}: return @@ -169,7 +168,7 @@ func (s *Downloader) OnL1Finalized(finalized uint64) { } func (s *Downloader) OnNewL1Head(head eth.L1BlockRef) { - s.mu.Lock() + s.mu.Lock() if s.latestHead > int64(head.Number) { s.log.Info("The tracking head is greater than new one, a reorg may happen", "tracking", s.latestHead, "new", head) } @@ -209,7 +208,7 @@ func (s *Downloader) downloadToCache() { return } end := s.latestHead - start := s.lastCacheBlock + start := s.lastCacheBlock if start == 0 { start = s.finalizedHead } @@ -217,8 +216,8 @@ func (s *Downloader) downloadToCache() { // @Qiang devnet-4 have issues to get blob event for the latest block, so if we need roll back to devnet-4 // we may need to change it to s.downloadRange(start, end, true) - _, err := s.downloadRange(start + 1, end, true) - + _, err := s.downloadRange(start+1, end, true) + if err == nil { s.lastCacheBlock = end } else { @@ -231,9 +230,9 @@ func (s *Downloader) download() { trackHead := s.finalizedHead s.mu.Unlock() - if (s.lastDownloadBlock > 0) && (trackHead - s.lastDownloadBlock > int64(s.minDurationForBlobsRequest)) { + if (s.lastDownloadBlock > 0) && (trackHead-s.lastDownloadBlock > int64(s.minDurationForBlobsRequest)) { // TODO: @Qiang we can also enter into an recovery mode (e.g., scan local blobs to obtain a heal list, more complicated, will do later) - prompt := "Ethereum only keep blobs for one month, but it has been over one month since last blob download." + + prompt := "Ethereum only keep blobs for one month, but it has been over one month since last blob download." + "You may need to restart this node with full re-sync" s.log.Error(prompt) return @@ -270,14 +269,14 @@ func (s *Downloader) download() { // save lastDownloadedBlock into database bs := make([]byte, 8) binary.LittleEndian.PutUint64(bs, uint64(end)) - + err = s.db.Put(append(downloaderPrefix, lastDownloadKey...), bs) if err != nil { s.log.Error("Save lastDownloadedBlock into db error", "err", err) return } s.log.Info("LastDownloadedBlock saved into db", "lastDownloadedBlock", end) - + s.dumpBlobsIfNeeded(blobs) s.lastDownloadBlock = end @@ -288,17 +287,17 @@ func (s *Downloader) download() { s.Cache.Cleanup(uint64(trackHead)) } -// The entire downloading process consists of two phases: +// The entire downloading process consists of two phases: // 1. Downloading the blobs into the cache when they are not finalized, with the option toCache set to true. // 2. Writing the blobs into the shard file when they are finalized, with the option toCache set to false. // we will attempt to read the blobs from the cache initially. If they don't exist in the cache, we will download them instead. func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob, error) { ts := time.Now() - + if end < start { end = start } - + events, err := s.l1Source.FilterLogsByBlockRange(big.NewInt(int64(start)), big.NewInt(int64(end))) if err != nil { return nil, err @@ -306,7 +305,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob elBlocks, err := s.eventsToBlocks(events) if err != nil { return nil, err - } + } blobs := []blob{} for _, elBlock := range elBlocks { // attempt to read the blobs from the cache first @@ -317,22 +316,22 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob continue } else { s.log.Info( - "Don't find blob in the cache, will try to download directly", - "blockNumber", elBlock.number, - "start", start, + "Don't find blob in the cache, will try to download directly", + "blockNumber", elBlock.number, + "start", start, "end", end, "toCache", toCache, ) } - + clBlobs, err := s.l1Beacon.DownloadBlobs(s.l1Beacon.Timestamp2Slot(elBlock.timestamp)) if err != nil { s.log.Error("L1 beacon download blob error", "err", err) return nil, err } - + for _, elBlob := range elBlock.blobs { - clBlob, exists := clBlobs[elBlob.hash]; + clBlob, exists := clBlobs[elBlob.hash] if !exists { s.log.Error("Did not find the event specified blob in the CL") @@ -344,7 +343,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob s.Cache.SetBlockBlobs(elBlock) } } - + s.log.Info("Download range", "cache", toCache, "start", start, "end", end, "blobNumber", len(blobs), "duration(ms)", time.Since(ts).Milliseconds()) return blobs, nil @@ -352,7 +351,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { if s.dumpDir != "" { - for _, blob := range blobs { + for _, blob := range blobs { fileName := filepath.Join(s.dumpDir, fmt.Sprintf("%s.dat", hex.EncodeToString(blob.data[:5]))) f, err := os.Create(fileName) if err != nil { @@ -360,7 +359,7 @@ func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { return } defer f.Close() - + writer := bufio.NewWriter(f) writer.WriteString(string(blob.data)) writer.Flush() @@ -381,23 +380,23 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { lastBlockNumber = event.BlockNumber blocks = append(blocks, &blockBlobs{ timestamp: res.Time, - number: event.BlockNumber, - hash: event.BlockHash, - blobs: []*blob{}, - }) + number: event.BlockNumber, + hash: event.BlockHash, + blobs: []*blob{}, + }) } - block := blocks[len(blocks) - 1] + block := blocks[len(blocks)-1] hash := common.Hash{} copy(hash[:], event.Topics[3][:]) - + blob := blob{ kvIndex: big.NewInt(0).SetBytes(event.Topics[1][:]), - kvSize: big.NewInt(0).SetBytes(event.Topics[2][:]), - hash: hash, + kvSize: big.NewInt(0).SetBytes(event.Topics[2][:]), + hash: hash, } block.blobs = append(block.blobs, &blob) } return blocks, nil -} \ No newline at end of file +} diff --git a/ethstorage/eth/beacon_client.go b/ethstorage/eth/beacon_client.go index 074563ee..4605e863 100644 --- a/ethstorage/eth/beacon_client.go +++ b/ethstorage/eth/beacon_client.go @@ -14,15 +14,15 @@ import ( ) type BeaconClient struct { - beaconURL string - basedTime uint64 - basedSlot uint64 - slotTime uint64 + beaconURL string + basedTime uint64 + basedSlot uint64 + slotTime uint64 } type Blob struct { - VersionedHash common.Hash - Data []byte + VersionedHash common.Hash + Data []byte } type beaconBlobs struct { @@ -45,13 +45,13 @@ func NewBeaconClient(url string, basedTime uint64, basedSlot uint64, slotTime ui beaconURL: url, basedTime: basedTime, basedSlot: basedSlot, - slotTime: slotTime, + slotTime: slotTime, } return res } func (c *BeaconClient) Timestamp2Slot(time uint64) uint64 { - return (time - c.basedTime) / c.slotTime + c.basedSlot + return (time-c.basedTime)/c.slotTime + c.basedSlot } func (c *BeaconClient) DownloadBlobs(slot uint64) (map[common.Hash]Blob, error) { @@ -82,19 +82,19 @@ func (c *BeaconClient) DownloadBlobs(slot uint64) (map[common.Hash]Blob, error) if err != nil { return nil, err } - res[hash] = Blob{VersionedHash: hash, Data:asciiBytes} + res[hash] = Blob{VersionedHash: hash, Data: asciiBytes} } return res, nil } func kzgToVersionedHash(commit string) (common.Hash, error) { - b, err := hex.DecodeString(commit[2:]); + b, err := hex.DecodeString(commit[2:]) if err != nil { return common.Hash{}, err } c := [48]byte{} - copy(c[:], b[:]) + copy(c[:], b[:]) return common.Hash(eth.KZGToVersionedHash(c)), nil -} \ No newline at end of file +} diff --git a/ethstorage/eth/polling_client.go b/ethstorage/eth/polling_client.go index c152b218..10bc2eb5 100644 --- a/ethstorage/eth/polling_client.go +++ b/ethstorage/eth/polling_client.go @@ -270,4 +270,3 @@ func (w *PollingClient) GetKvMetas(kvIndices []uint64, blockNumber int64) ([][32 return res[0].([][32]byte), nil } - diff --git a/ethstorage/node/es_api.go b/ethstorage/node/es_api.go index b4fbce7c..a93ea835 100644 --- a/ethstorage/node/es_api.go +++ b/ethstorage/node/es_api.go @@ -42,20 +42,20 @@ func NewESAPI(config *RPCConfig, sm *ethstorage.StorageManager, dl *downloader.D func (api *esAPI) GetBlob(kvIndex uint64, blobHash common.Hash, decodeType DecodeType, off, size uint64) (hexutil.Bytes, error) { blob := api.dl.Cache.GetKeyValueByIndex(kvIndex, blobHash) - + if blob == nil { commit, _, err := api.sm.TryReadMeta(kvIndex) if err != nil { return nil, err } - + if !bytes.Equal(commit[0:ethstorage.HashSizeInContract], blobHash[0:ethstorage.HashSizeInContract]) { return nil, errors.New("commits not same") } - + readCommit := common.Hash{} copy(readCommit[0:ethstorage.HashSizeInContract], blobHash[0:ethstorage.HashSizeInContract]) - + var found bool blob, found, err = api.sm.TryRead(kvIndex, int(api.sm.MaxKvSize()), readCommit) if err != nil { @@ -72,9 +72,9 @@ func (api *esAPI) GetBlob(kvIndex uint64, blobHash common.Hash, decodeType Decod ret = utils.DecodeBlob(blob) } - if len(ret) < int(off + size) { + if len(ret) < int(off+size) { return nil, errors.New("beyond the range of blob size") } - return ret[off:off+size], nil + return ret[off : off+size], nil } diff --git a/ethstorage/node/eth_api.go b/ethstorage/node/eth_api.go index 94a86d9a..39b3be13 100644 --- a/ethstorage/node/eth_api.go +++ b/ethstorage/node/eth_api.go @@ -20,7 +20,7 @@ type ethAPI struct { } const ( - ESChainID = 333 + ESChainID = 333 defaultCallTimeout = 2 * time.Second ) @@ -28,7 +28,6 @@ var ( rpcCli *rpc.Client ) - func NewETHAPI(config *RPCConfig, log log.Logger) *ethAPI { return ðAPI{ rpcCfg: config, @@ -98,9 +97,9 @@ func (api *ethAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash defer cancel() var hex hexutil.Bytes err = rpcCli.CallContext( - callCtx, - &hex, - "eth_esCall", + callCtx, + &hex, + "eth_esCall", args, blockNrOrHash) return hex, err } diff --git a/ethstorage/node/server.go b/ethstorage/node/server.go index eba8dd57..cc5fd87e 100644 --- a/ethstorage/node/server.go +++ b/ethstorage/node/server.go @@ -30,11 +30,11 @@ type rpcServer struct { } func newRPCServer( - ctx context.Context, - rpcCfg *RPCConfig, - sm *ethstorage.StorageManager, - dl *downloader.Downloader, - log log.Logger, + ctx context.Context, + rpcCfg *RPCConfig, + sm *ethstorage.StorageManager, + dl *downloader.Downloader, + log log.Logger, appVersion string, ) (*rpcServer, error) { esAPI := NewESAPI(rpcCfg, sm, dl, log) diff --git a/ethstorage/p2p/host.go b/ethstorage/p2p/host.go index 27689bfa..a435a655 100644 --- a/ethstorage/p2p/host.go +++ b/ethstorage/p2p/host.go @@ -247,7 +247,6 @@ func YamuxC() libp2p.Option { return libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport) } - func NoiseC() libp2p.Option { return libp2p.Security(noise.ID, noise.New) } diff --git a/ethstorage/signer/client.go b/ethstorage/signer/client.go index 485f0e55..2c6e5917 100644 --- a/ethstorage/signer/client.go +++ b/ethstorage/signer/client.go @@ -52,7 +52,7 @@ func (s *SignerClient) pingVersion() (string, error) { } func (s *SignerClient) SignTransaction(ctx context.Context, chainId *big.Int, from common.Address, tx *types.Transaction) (*types.Transaction, error) { - args := NewTransactionArgsFromTransaction(chainId, from, tx) + args := NewTransactionArgsFromTransaction(chainId, from, tx) signed := &signTransactionResult{} if err := s.client.CallContext(ctx, &signed, "account_signTransaction", args); err != nil { return nil, fmt.Errorf("account_signTransaction failed: %w", err) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 9a96c6f3..52a713c7 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -102,8 +102,8 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs wg.Wait() for i := 0; i < taskIdx; i++ { - res := <- chanRes - if (res != nil) { + res := <-chanRes + if res != nil { return res } } From 5eae1c9a62744cc43811f97879a6b00b212a1e46 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 15:33:14 +0800 Subject: [PATCH 49/68] remove lint branch --- .github/workflows/golangci-lint.yml | 1 - golangci.yml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 974546b1..b6690dc9 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -3,7 +3,6 @@ on: push: branches: - main - - lint pull_request: permissions: diff --git a/golangci.yml b/golangci.yml index 624dcbd6..99657d40 100644 --- a/golangci.yml +++ b/golangci.yml @@ -72,7 +72,7 @@ linters-settings: # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. # Such cases aren't reported by default. # Default: false - check-blank: true + check-blank: false # DEPRECATED comma-separated list of pairs of the form pkg:regex # # the regex is used to ignore names within pkg. (default "fmt:.*"). From 334c65a5ea36d180f23861b398b1d6f0ee97eba7 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 15:57:36 +0800 Subject: [PATCH 50/68] fix merge fmt error --- .github/workflows/golangci-lint.yml | 1 - ethstorage/flags/p2p_flags.go | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index b6690dc9..bba70d9f 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -3,7 +3,6 @@ on: push: branches: - main - pull_request: permissions: contents: read diff --git a/ethstorage/flags/p2p_flags.go b/ethstorage/flags/p2p_flags.go index 37b972a2..9d88c0c1 100644 --- a/ethstorage/flags/p2p_flags.go +++ b/ethstorage/flags/p2p_flags.go @@ -170,8 +170,8 @@ var ( EnvVar: p2pEnv("MAX_CONCURRENCY"), } MetaDownloadBatchSize = cli.Uint64Flag{ - Name: "p2p.meta.download.batch", - Usage: "Batch size for requesting the blob metadatas stored in the storage contract in one RPC call.", + Name: "p2p.meta.download.batch", + Usage: "Batch size for requesting the blob metadatas stored in the storage contract in one RPC call.", Required: false, Value: 8000, // The upper limit of devnet-11 geth node EnvVar: p2pEnv("META_BATCH_SIZE"), From dad2e8a5a16872e290523a58bd1631b4597b0522 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Thu, 30 Nov 2023 16:13:22 +0800 Subject: [PATCH 51/68] remove ignore --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index 804973cb..3cb65c12 100644 --- a/.gitignore +++ b/.gitignore @@ -68,4 +68,3 @@ profile.cov **/yarn-error.log logs/ -/golangci_back.yml From dace0caaa60a8c44b063944f7cc153ede83f8a10 Mon Sep 17 00:00:00 2001 From: pingke Date: Thu, 30 Nov 2023 20:20:56 +0800 Subject: [PATCH 52/68] resolve comments --- ethstorage/metrics/metrics.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ethstorage/metrics/metrics.go b/ethstorage/metrics/metrics.go index f4236426..b0b1dac2 100644 --- a/ethstorage/metrics/metrics.go +++ b/ethstorage/metrics/metrics.go @@ -51,7 +51,7 @@ type Metricer interface { Serve(ctx context.Context, hostname string, port int) error } -// Metrics tracks all the metrics for the op-node. +// Metrics tracks all the metrics for the es-node. type Metrics struct { lastSubmissionTimes map[uint64]uint64 @@ -377,7 +377,7 @@ func NewMetrics(procName string) *Metrics { Up: factory.NewGauge(prometheus.GaugeOpts{ Namespace: ns, Name: "up", - Help: "1 if the op node has finished starting up", + Help: "1 if the es node has finished starting up", }), registry: registry, @@ -541,7 +541,7 @@ func (m *Metrics) ServerRecordTimeUsed(method string) func() { } // RecordInfo sets a pseudo-metric that contains versioning and -// config info for the opnode. +// config info for the es node. func (m *Metrics) RecordInfo(version string) { m.Info.WithLabelValues(version).Set(1) } From d247be24bcda7270a22dbbc3ce0e4bd2f41f6caf Mon Sep 17 00:00:00 2001 From: pingke Date: Thu, 30 Nov 2023 20:37:17 +0800 Subject: [PATCH 53/68] expose miningInfo Attributes to other package --- ethstorage/miner/l1_mining_api.go | 6 +++--- ethstorage/miner/miner.go | 12 ++++++------ ethstorage/miner/worker.go | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ethstorage/miner/l1_mining_api.go b/ethstorage/miner/l1_mining_api.go index 35b07033..1c38dd78 100644 --- a/ethstorage/miner/l1_mining_api.go +++ b/ethstorage/miner/l1_mining_api.go @@ -52,9 +52,9 @@ func (m *l1MiningAPI) GetMiningInfo(ctx context.Context, contract common.Address return nil, err } mi := &miningInfo{ - lastMineTime: res[0].(*big.Int).Uint64(), - difficulty: res[1].(*big.Int), - blockMined: res[2].(*big.Int), + LastMineTime: res[0].(*big.Int).Uint64(), + Difficulty: res[1].(*big.Int), + BlockMined: res[2].(*big.Int), } return mi, nil } diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index ab73a321..71580b48 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -31,17 +31,17 @@ type MiningProver interface { } type miningInfo struct { - lastMineTime uint64 - difficulty *big.Int - blockMined *big.Int + LastMineTime uint64 + Difficulty *big.Int + BlockMined *big.Int } func (a *miningInfo) String() string { return fmt.Sprintf( "LastMineTime: %d, Difficulty: %s, BlockMined: %s", - a.lastMineTime, - a.difficulty.String(), - a.blockMined.String(), + a.LastMineTime, + a.Difficulty.String(), + a.BlockMined.String(), ) } diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 37607a0f..371e617c 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -235,11 +235,11 @@ func (w *worker) updateDifficulty(shardIdx, blockTime uint64) (*big.Int, error) w.lg.Warn("Failed to get es mining info", "error", err.Error()) return nil, err } - w.lg.Info("Mining info retrieved", "shard", shardIdx, "lastMineTime", info.lastMineTime, "difficulty", info.difficulty, "proofsSubmitted", info.blockMined) + w.lg.Info("Mining info retrieved", "shard", shardIdx, "LastMineTime", info.LastMineTime, "Difficulty", info.Difficulty, "proofsSubmitted", info.BlockMined) reqDiff := new(big.Int).Div(maxUint256, expectedDiff( - info.lastMineTime, + info.LastMineTime, blockTime, - info.difficulty, + info.Difficulty, w.config.Cutoff, w.config.DiffAdjDivisor, w.config.MinimumDiff, From ac5921fd6cbbfe0bd21223bac0e116fb787cf90c Mon Sep 17 00:00:00 2001 From: pingke Date: Fri, 1 Dec 2023 14:45:27 +0800 Subject: [PATCH 54/68] fix build --- ethstorage/p2p/node.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ethstorage/p2p/node.go b/ethstorage/p2p/node.go index 0e799836..b7b293c6 100644 --- a/ethstorage/p2p/node.go +++ b/ethstorage/p2p/node.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethstorage/go-ethstorage/ethstorage" + me "github.com/ethstorage/go-ethstorage/ethstorage/metrics" "github.com/ethstorage/go-ethstorage/ethstorage/p2p/protocol" "github.com/ethstorage/go-ethstorage/ethstorage/rollup" "github.com/hashicorp/go-multierror" @@ -87,9 +88,9 @@ func (n *NodeP2P) init(resourcesCtx context.Context, rollupCfg *rollup.EsConfig, n.gater = extra.ConnectionGater() n.connMgr = extra.ConnectionManager() } - m := (protocol.Metricer)(nil) + m := (me.Metricer)(nil) if rollupCfg.MetricsEnable { - m = protocol.NewMetrics("sync") + m = me.NewMetrics("sync") } // Activate the P2P req-resp sync n.syncCl = protocol.NewSyncClient(log, rollupCfg, n.host.NewStream, storageManager, setup.SyncerParams(), db, m, feed) From 9fe1a9e6380424ec36dc43702f71488d79733c45 Mon Sep 17 00:00:00 2001 From: pingke Date: Fri, 1 Dec 2023 17:08:56 +0800 Subject: [PATCH 55/68] add comment --- ethstorage/p2p/discovery.go | 1 + 1 file changed, 1 insertion(+) diff --git a/ethstorage/p2p/discovery.go b/ethstorage/p2p/discovery.go index 22d35459..dced38ab 100644 --- a/ethstorage/p2p/discovery.go +++ b/ethstorage/p2p/discovery.go @@ -64,6 +64,7 @@ func (conf *Config) Discovery(log log.Logger, l1ChainID uint64, tcpPort uint16, localNode.SetStaticIP(ip) break } + // TODO: if no external IP found, use NAT protocol to find external IP } if conf.AdvertiseUDPPort != 0 { // explicitly advertised port gets priority localNode.SetFallbackUDP(int(conf.AdvertiseUDPPort)) From a40f1afc64cc5cacf9c663baef8c771381d55a61 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Fri, 1 Dec 2023 17:39:08 +0800 Subject: [PATCH 56/68] add merge ci --- .github/workflows/github-actions-es.yml | 14 +++++++++++++- .github/workflows/golangci-lint.yml | 11 +++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 16d105b7..cdd641e3 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -4,7 +4,13 @@ name: GitHub Actions ES run-name: ${{ github.actor }} is push code to main 🚀 on: push: - branches: [ "main" ] + branches: + - main + - merge + pull_request: + branches: + - main + - merge jobs: build: runs-on: ubuntu-latest @@ -21,3 +27,9 @@ jobs: - name: Test run: go test -v ./... -tags ci + + - name: Check build and test results + if: failure() + run: | + echo "FMT build and test failed. Please fix the issues before merging." + exit 1 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index bba70d9f..c08a11d2 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -3,6 +3,11 @@ on: push: branches: - main + - merge + pull_request: + branches: + - main + - merge permissions: contents: read @@ -52,3 +57,9 @@ jobs: # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. # install-mode: "goinstall" + + - name: Check fmt results + if: failure() + run: | + echo "FMT check failed. Please fix the issues before merging." + exit 1 From 8dca6f6679dd28f9a9e9fe5bf37c94364b942df5 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Fri, 1 Dec 2023 17:45:21 +0800 Subject: [PATCH 57/68] update pr state --- .github/workflows/github-actions-es.yml | 23 +++++++++++++++++++---- .github/workflows/golangci-lint.yml | 23 +++++++++++++++++++---- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index cdd641e3..8a0a8bb1 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -29,7 +29,22 @@ jobs: run: go test -v ./... -tags ci - name: Check build and test results - if: failure() - run: | - echo "FMT build and test failed. Please fix the issues before merging." - exit 1 + uses: actions/github-script@v4 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const { data: pr } = await github.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }); + + if (job.status === 'failure') { + await github.pulls.updateBranch({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + expected_head_sha: pr.head.sha, + status: 'behind' + }); + } diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index c08a11d2..44a73eb1 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -59,7 +59,22 @@ jobs: # install-mode: "goinstall" - name: Check fmt results - if: failure() - run: | - echo "FMT check failed. Please fix the issues before merging." - exit 1 + uses: actions/github-script@v4 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const { data: pr } = await github.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number + }); + + if (job.status === 'failure') { + await github.pulls.updateBranch({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + expected_head_sha: pr.head.sha, + status: 'behind' + }); + } From 0440d5114bab0b38d3fcd3719c763bf0f537152e Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Fri, 1 Dec 2023 17:53:59 +0800 Subject: [PATCH 58/68] remove check --- .github/workflows/github-actions-es.yml | 21 --------------------- .github/workflows/golangci-lint.yml | 21 --------------------- 2 files changed, 42 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 8a0a8bb1..d45430e1 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -27,24 +27,3 @@ jobs: - name: Test run: go test -v ./... -tags ci - - - name: Check build and test results - uses: actions/github-script@v4 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - const { data: pr } = await github.pulls.get({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number - }); - - if (job.status === 'failure') { - await github.pulls.updateBranch({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number, - expected_head_sha: pr.head.sha, - status: 'behind' - }); - } diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 44a73eb1..bd190243 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -57,24 +57,3 @@ jobs: # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. # install-mode: "goinstall" - - - name: Check fmt results - uses: actions/github-script@v4 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - const { data: pr } = await github.pulls.get({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number - }); - - if (job.status === 'failure') { - await github.pulls.updateBranch({ - owner: context.repo.owner, - repo: context.repo.repo, - pull_number: context.issue.number, - expected_head_sha: pr.head.sha, - status: 'behind' - }); - } From 731265b0ed45afffd660e712ea59b46799f459be Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 16:33:28 +0800 Subject: [PATCH 59/68] Add blocking merge --- .github/workflows/github-actions-es.yml | 7 +++++++ .github/workflows/golangci-lint.yml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index d45430e1..62ed41ca 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -27,3 +27,10 @@ jobs: - name: Test run: go test -v ./... -tags ci + + - name: Verify execution results + run: | + if [ $? -ne 0 ]; then + echo "Verification failed, blocking merge" + exit 1 + fi diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index bd190243..2150b225 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -57,3 +57,10 @@ jobs: # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. # install-mode: "goinstall" + + - name: Verify execution results + run: | + if [ $? -ne 0 ]; then + echo "Verification failed, blocking merge" + exit 1 + fi From 24c56e094663eed7cb14e3d3dd0a1db2eb260d1e Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 16:43:28 +0800 Subject: [PATCH 60/68] Add blocking merge --- .github/workflows/github-actions-es.yml | 5 ++--- .github/workflows/golangci-lint.yml | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 62ed41ca..2be0342e 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -29,8 +29,7 @@ jobs: run: go test -v ./... -tags ci - name: Verify execution results + if: failure() run: | - if [ $? -ne 0 ]; then - echo "Verification failed, blocking merge" + echo "Previous steps failed. Cannot merge pull request." exit 1 - fi diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 2150b225..7e2c12f0 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -59,8 +59,7 @@ jobs: # install-mode: "goinstall" - name: Verify execution results + if: failure() run: | - if [ $? -ne 0 ]; then - echo "Verification failed, blocking merge" - exit 1 - fi + echo "Previous steps failed. Cannot merge pull request." + exit 1 From f85ffad96f48c206eccbf1af758d55bfacd6ce1b Mon Sep 17 00:00:00 2001 From: Qiang Zhu Date: Mon, 4 Dec 2023 16:50:31 +0800 Subject: [PATCH 61/68] Upgrade to devnet-12, and use geth master branch as dependency --- go.mod | 9 +++------ go.sum | 14 ++++++-------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 5689e320..7e32094d 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,13 @@ go 1.20 require ( github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-kzg-4844 v0.6.1-0.20231019121413-3621cc59f0c7 + github.com/crate-crypto/go-kzg-4844 v0.7.0 github.com/crate-crypto/go-proto-danksharding-crypto v0.0.0-20230312204821-9a244123c812 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/edsrzf/mmap-go v1.1.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/optimism v1.0.9 - github.com/ethereum/go-ethereum v1.11.6 + github.com/ethereum/go-ethereum v1.13.5 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/uint256 v1.2.3 @@ -42,7 +42,7 @@ require ( github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect - github.com/ethereum/c-kzg-4844 v0.3.2-0.20231019020040-748283cced54 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fjl/memsize v0.0.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-stack/stack v1.8.1 // indirect @@ -57,7 +57,6 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect - github.com/libp2p/go-mplex v0.7.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/rivo/uniseg v0.4.3 // indirect github.com/rs/cors v1.8.2 // indirect @@ -184,5 +183,3 @@ require ( ) replace github.com/iden3/go-iden3-crypto => github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425 - -replace github.com/ethereum/go-ethereum => github.com/lightclient/go-ethereum v1.10.10-0.20231019143932-4d161dee0c4c diff --git a/go.sum b/go.sum index 5022d0e7..849f2bc9 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-kzg-4844 v0.6.1-0.20231019121413-3621cc59f0c7 h1:VpZxBC99nEW8Rkz1EBBf7JmaM20H+ZkSmqdxpYEoXuo= -github.com/crate-crypto/go-kzg-4844 v0.6.1-0.20231019121413-3621cc59f0c7/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/crate-crypto/go-proto-danksharding-crypto v0.0.0-20230312204821-9a244123c812 h1:fvpzeIO449sb44y2Nqd0MVziJHvp0OFCG66t3ZjuYqU= github.com/crate-crypto/go-proto-danksharding-crypto v0.0.0-20230312204821-9a244123c812/go.mod h1:ZNzUrSnC7IXKtQWnROzWVfQSivVSCPkMtwXekLDj4qI= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -150,8 +150,10 @@ github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= github.com/ethereum-optimism/optimism v1.0.9 h1:ey7qw26/jPdUhde7LZtHUTM9Kpa2Dg4/Vv+Nru8NXhI= github.com/ethereum-optimism/optimism v1.0.9/go.mod h1:Z+2H7W3Oz50spYUl72DNUn1LnKqDL0SOn/uUGlL+Lys= -github.com/ethereum/c-kzg-4844 v0.3.2-0.20231019020040-748283cced54 h1:jDyQvQjauRyb7TJAF9W7J3NOjn3ukXahd3l+rd1Fak8= -github.com/ethereum/c-kzg-4844 v0.3.2-0.20231019020040-748283cced54/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= +github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425 h1:dKQu1oXrt6ndFl4XtAZsfBubU8c5W59T85L8MGtWawE= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -406,8 +408,6 @@ github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNz github.com/libp2p/go-libp2p-pubsub v0.9.0 h1:mcLb4WzwhUG4OKb0rp1/bYMd/DYhvMyzJheQH3LMd1s= github.com/libp2p/go-libp2p-pubsub v0.9.0/go.mod h1:OEsj0Cc/BpkqikXRTrVspWU/Hx7bMZwHP+6vNMd+c7I= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= -github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= @@ -420,8 +420,6 @@ github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtI github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/lightclient/go-ethereum v1.10.10-0.20231019143932-4d161dee0c4c h1:UsDsJSV7/j0C4aGqECXoPvdL6a6y7myXMptcnQjflEY= -github.com/lightclient/go-ethereum v1.10.10-0.20231019143932-4d161dee0c4c/go.mod h1:CLSRGaP4Ev4DJOP+JSk3NHyJIillQLJc/ZAyCLI8NOs= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= From 7063345b432875532fbf6ab1d37bce803f568851 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 17:03:59 +0800 Subject: [PATCH 62/68] remove other code --- .github/workflows/github-actions-es.yml | 8 -------- .github/workflows/golangci-lint.yml | 8 -------- 2 files changed, 16 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 2be0342e..1eeadd30 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -6,11 +6,9 @@ on: push: branches: - main - - merge pull_request: branches: - main - - merge jobs: build: runs-on: ubuntu-latest @@ -27,9 +25,3 @@ jobs: - name: Test run: go test -v ./... -tags ci - - - name: Verify execution results - if: failure() - run: | - echo "Previous steps failed. Cannot merge pull request." - exit 1 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 7e2c12f0..f7e88a28 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -3,11 +3,9 @@ on: push: branches: - main - - merge pull_request: branches: - main - - merge permissions: contents: read @@ -57,9 +55,3 @@ jobs: # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. # install-mode: "goinstall" - - - name: Verify execution results - if: failure() - run: | - echo "Previous steps failed. Cannot merge pull request." - exit 1 From b33e84a1cc673c87770e1ada7ab0914fedbdfd8e Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 18:03:33 +0800 Subject: [PATCH 63/68] add test --- .github/workflows/github-actions-es.yml | 1 + .github/workflows/golangci-lint.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 1eeadd30..f37157a4 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -9,6 +9,7 @@ on: pull_request: branches: - main + - merge jobs: build: runs-on: ubuntu-latest diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index f7e88a28..8118bcfc 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -6,6 +6,7 @@ on: pull_request: branches: - main + - merge permissions: contents: read From 01b3ab95ec6a521eededbaa70e5ef3b6cf4042ff Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 18:25:01 +0800 Subject: [PATCH 64/68] add ci name --- .github/workflows/github-actions-es.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index f37157a4..e837b6f6 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -12,6 +12,7 @@ on: - merge jobs: build: + name: build-and-test runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 From 37a4b44009cb59ecd99e8686a4cc052d971d5b03 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 18:56:31 +0800 Subject: [PATCH 65/68] remove branch --- .github/workflows/github-actions-es.yml | 1 - .github/workflows/golangci-lint.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index e837b6f6..9a67caca 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -9,7 +9,6 @@ on: pull_request: branches: - main - - merge jobs: build: name: build-and-test diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 8118bcfc..f7e88a28 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -6,7 +6,6 @@ on: pull_request: branches: - main - - merge permissions: contents: read From 1bcd5a2506dc5d331415c096e860dffff95caa18 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 19:07:41 +0800 Subject: [PATCH 66/68] remove push action --- .github/workflows/github-actions-es.yml | 3 --- .github/workflows/golangci-lint.yml | 5 +---- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/github-actions-es.yml index 9a67caca..a80c5617 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/github-actions-es.yml @@ -3,9 +3,6 @@ name: GitHub Actions ES run-name: ${{ github.actor }} is push code to main 🚀 on: - push: - branches: - - main pull_request: branches: - main diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index f7e88a28..e28cdb25 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -1,8 +1,5 @@ -name: golangci-lint +name: GoLang CI Lint on: - push: - branches: - - main pull_request: branches: - main From 30397724881c76fc3b3dba4d0236fd0ee65c985c Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 19:11:45 +0800 Subject: [PATCH 67/68] change name --- .../{github-actions-es.yml => golangci-build-test.yml} | 2 +- .github/workflows/publish.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename .github/workflows/{github-actions-es.yml => golangci-build-test.yml} (95%) diff --git a/.github/workflows/github-actions-es.yml b/.github/workflows/golangci-build-test.yml similarity index 95% rename from .github/workflows/github-actions-es.yml rename to .github/workflows/golangci-build-test.yml index a80c5617..73c30aae 100644 --- a/.github/workflows/github-actions-es.yml +++ b/.github/workflows/golangci-build-test.yml @@ -1,6 +1,6 @@ # This workflow will build a golang project # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go -name: GitHub Actions ES +name: GoLang CI Build-Test run-name: ${{ github.actor }} is push code to main 🚀 on: pull_request: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 1959a259..129b21d1 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,6 +1,6 @@ # This workflow will publish an es-node release with a pre-build executable -name: GitHub Actions ES Publish +name: GoLang CI Publish run-name: ${{ github.actor }} is publishing a release 🚀 on: push: @@ -39,4 +39,4 @@ jobs: es-node.${{github.ref_name}}.darwin-amd64 es-node.${{github.ref_name}}.darwin-arm64 es-node.${{github.ref_name}}.windows-amd64 - generate_release_notes: true \ No newline at end of file + generate_release_notes: true From dc8398f3870ea37f14144feb699f50559f9af074 Mon Sep 17 00:00:00 2001 From: "iteyelmp@gmail.com" Date: Mon, 4 Dec 2023 19:12:54 +0800 Subject: [PATCH 68/68] change name --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 129b21d1..f80669a9 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,6 +1,6 @@ # This workflow will publish an es-node release with a pre-build executable -name: GoLang CI Publish +name: Publish run-name: ${{ github.actor }} is publishing a release 🚀 on: push: