From 10523663553407d77fb8bfffb5d3525d0e7c0106 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 1 Jul 2024 18:51:16 +0800 Subject: [PATCH 01/61] load blob from cache --- ethstorage/miner/miner.go | 15 +++++++++++++-- ethstorage/miner/worker.go | 27 +++++++++++++-------------- ethstorage/node/node.go | 17 ++++++++++++++++- 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index a4b02f22..4a36395f 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -31,6 +31,8 @@ type MiningProver interface { GetStorageProof(encodedKVs [][]byte, encodingKey []common.Hash, sampleIdxInKv []uint64) ([]*big.Int, [][]byte, [][]byte, error) } +type GetBlobFn func(kvIdx uint64, blobHash common.Hash) ([]byte, error) + type miningInfo struct { LastMineTime uint64 Difficulty *big.Int @@ -58,7 +60,16 @@ type Miner struct { lg log.Logger } -func New(config *Config, db ethdb.Database, storageMgr *ethstorage.StorageManager, api L1API, prover MiningProver, feed *event.Feed, lg log.Logger) *Miner { +func New( + config *Config, + db ethdb.Database, + storageMgr *ethstorage.StorageManager, + api L1API, + getBlob GetBlobFn, + prover MiningProver, + feed *event.Feed, + lg log.Logger, +) *Miner { chainHeadCh := make(chan eth.L1BlockRef, chainHeadChanSize) miner := &Miner{ feed: feed, @@ -67,7 +78,7 @@ func New(config *Config, db ethdb.Database, storageMgr *ethstorage.StorageManage startCh: make(chan struct{}), stopCh: make(chan struct{}), lg: lg, - worker: newWorker(*config, db, storageMgr, api, chainHeadCh, prover, lg), + worker: newWorker(*config, db, storageMgr, api, getBlob, chainHeadCh, prover, lg), } miner.wg.Add(1) go miner.update() diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 157bc388..7e84c727 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -99,6 +99,7 @@ type result struct { type worker struct { config Config l1API L1API + getBlob GetBlobFn prover MiningProver db ethdb.Database storageMgr *es.StorageManager @@ -126,6 +127,7 @@ func newWorker( db ethdb.Database, storageMgr *es.StorageManager, api L1API, + getBlob GetBlobFn, chainHeadCh chan eth.L1BlockRef, prover MiningProver, lg log.Logger, @@ -139,6 +141,7 @@ func newWorker( worker := &worker{ config: config, l1API: api, + getBlob: getBlob, prover: prover, chainHeadCh: chainHeadCh, shardTaskMap: make(map[uint64]task), @@ -610,23 +613,19 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, return nil, nil, nil, nil, nil, err } for i := uint64(0); i < checksLen; i++ { - kvData, exist, err := w.storageMgr.TryRead(kvIdxs[i], int(w.storageMgr.MaxKvSize()), kvHashes[i]) - if exist && err == nil { - dataSet[i] = kvData - sampleIdxsInKv[i] = sampleIdx[i] % (1 << sampleLenBits) - encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) - encodedSample, err := w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) - if err != nil { - return nil, nil, nil, nil, nil, err - } - encodedSamples[i] = encodedSample - } else { - if !exist { - err = fmt.Errorf("kv not found: index=%d", kvIdxs[i]) - } + kvData, err := w.getBlob(kvIdxs[i], kvHashes[i]) + if err != nil { w.lg.Error("Get data error", "index", kvIdxs[i], "error", err.Error()) return nil, nil, nil, nil, nil, err } + dataSet[i] = kvData + sampleIdxsInKv[i] = sampleIdx[i] % (1 << sampleLenBits) + encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) + encodedSample, err := w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) + if err != nil { + return nil, nil, nil, nil, nil, err + } + encodedSamples[i] = encodedSample } return dataSet, kvIdxs, sampleIdxsInKv, encodingKeys, encodedSamples, nil } diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index ee95bd96..018a05e9 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -13,6 +13,7 @@ import ( "time" "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -300,7 +301,21 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverMode, n.log, ) - n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, &pvr, n.feed, n.log) + getBlobFn := func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + blob := n.downloader.Cache.GetKeyValueByIndex(kvIdx, kvHash) + if blob != nil { + return blob, nil + } + kvData, exist, err := n.storageManager.TryRead(kvIdx, int(n.storageManager.MaxKvSize()), kvHash) + if err != nil { + return nil, err + } + if !exist { + return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + } + return kvData, nil + } + n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, getBlobFn, &pvr, n.feed, n.log) log.Info("Initialized miner") return nil } From 88922a57437e175234f9399fd9399157f9eeef18 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 1 Jul 2024 19:08:03 +0800 Subject: [PATCH 02/61] fix lint --- ethstorage/miner/miner_test.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 15871f05..fe9e9b80 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -6,6 +6,7 @@ package miner import ( + "fmt" "math/big" "os" "path/filepath" @@ -69,7 +70,16 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - miner := New(defaultConfig, db, storageMgr, l1api, &pvr, fd, lg) + miner := New(defaultConfig, db, storageMgr, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + kvData, exist, err := storageMgr.TryRead(kvIdx, int(storageMgr.MaxKvSize()), kvHash) + if err != nil { + return nil, err + } + if !exist { + return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + } + return kvData, nil + }, &pvr, fd, lg) return miner } From 3cf2a9d2a4e9b8f44103af490fdfc3d7cc1d4320 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 1 Jul 2024 19:14:40 +0800 Subject: [PATCH 03/61] fix lint --- integration_tests/node_mine_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index 6be924e5..568aed09 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -77,7 +77,16 @@ func TestMining(t *testing.T) { l1api := miner.NewL1MiningAPI(pClient, nil, lg) pvr := prover.NewKZGPoseidonProver(miningConfig.ZKWorkingDir, miningConfig.ZKeyFileName, 2, lg) db := rawdb.NewMemoryDatabase() - mnr := miner.New(miningConfig, db, storageManager, l1api, &pvr, feed, lg) + mnr := miner.New(miningConfig, db, storageManager, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + kvData, exist, err := storageManager.TryRead(kvIdx, int(storageManager.MaxKvSize()), kvHash) + if err != nil { + return nil, err + } + if !exist { + return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + } + return kvData, nil + }, &pvr, feed, lg) lg.Info("Initialized miner") l1HeadsSub := event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) { From bf0863044965dead7e9335b94486cc864a978be6 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 2 Jul 2024 09:54:50 +0800 Subject: [PATCH 04/61] add log --- ethstorage/node/node.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 018a05e9..8d0c74c2 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -304,19 +304,21 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { getBlobFn := func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { blob := n.downloader.Cache.GetKeyValueByIndex(kvIdx, kvHash) if blob != nil { + n.log.Info("Loaded blob from downloader cache", "kvIdx", kvIdx) return blob, nil } - kvData, exist, err := n.storageManager.TryRead(kvIdx, int(n.storageManager.MaxKvSize()), kvHash) + blob, exist, err := n.storageManager.TryRead(kvIdx, int(n.storageManager.MaxKvSize()), kvHash) if err != nil { return nil, err } if !exist { return nil, fmt.Errorf("kv not found: index=%d", kvIdx) } - return kvData, nil + n.log.Info("Loaded blob from storage manager", "kvIdx", kvIdx) + return blob, nil } n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, getBlobFn, &pvr, n.feed, n.log) - log.Info("Initialized miner") + n.log.Info("Initialized miner") return nil } From fdd537493fe640a51c6da97b24a1e58f54bc1538 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 3 Jul 2024 15:31:25 +0800 Subject: [PATCH 05/61] encode sample --- ethstorage/miner/miner.go | 2 +- ethstorage/miner/worker.go | 18 ++++++++++++++---- ethstorage/node/node.go | 10 +++++----- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index 8bac5ac9..c93eeaa8 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -32,7 +32,7 @@ type MiningProver interface { GetStorageProof(encodedKVs [][]byte, encodingKey []common.Hash, sampleIdxInKv []uint64) ([]*big.Int, [][]byte, [][]byte, error) } -type GetBlobFn func(kvIdx uint64, blobHash common.Hash) ([]byte, error) +type GetBlobFn func(kvIdx uint64, blobHash common.Hash) ([]byte, bool, error) type miningInfo struct { LastMineTime uint64 diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index ee9f96d7..15bc4268 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -613,7 +613,7 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, return nil, nil, nil, nil, nil, err } for i := uint64(0); i < checksLen; i++ { - kvData, err := w.getBlob(kvIdxs[i], kvHashes[i]) + kvData, fromCache, err := w.getBlob(kvIdxs[i], kvHashes[i]) if err != nil { w.lg.Error("Get data error", "index", kvIdxs[i], "error", err.Error()) return nil, nil, nil, nil, nil, err @@ -621,9 +621,19 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, dataSet[i] = kvData sampleIdxsInKv[i] = sampleIdx[i] % (1 << sampleLenBits) encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) - encodedSample, err := w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) - if err != nil { - return nil, nil, nil, nil, nil, err + var encodedSample common.Hash + if fromCache { + encodeType, _ := w.storageMgr.GetShardEncodeType(t.shardIdx) + sampleSize := uint64(1 << sampleSizeBits) + sampleIdxByte := sampleIdxsInKv[i] * sampleSize + sample := kvData[sampleIdxByte : sampleIdxByte+sampleSize] + encodedBytes := es.EncodeChunk(sampleSize, sample, encodeType, encodingKeys[i]) + encodedSample = common.BytesToHash(encodedBytes) + } else { + encodedSample, err = w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) + if err != nil { + return nil, nil, nil, nil, nil, err + } } encodedSamples[i] = encodedSample } diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 78b63c29..08480d17 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -302,21 +302,21 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - getBlobFn := func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + getBlobFn := func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { blob := n.downloader.Cache.GetKeyValueByIndex(kvIdx, kvHash) if blob != nil { n.log.Info("Loaded blob from downloader cache", "kvIdx", kvIdx) - return blob, nil + return blob, true, nil } blob, exist, err := n.storageManager.TryRead(kvIdx, int(n.storageManager.MaxKvSize()), kvHash) if err != nil { - return nil, err + return nil, false, err } if !exist { - return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) } n.log.Info("Loaded blob from storage manager", "kvIdx", kvIdx) - return blob, nil + return blob, false, nil } n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, getBlobFn, &pvr, n.feed, n.log) n.log.Info("Initialized miner") From cd7120c63203cd1ccb27b2946b7e031c99988123 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 3 Jul 2024 18:44:51 +0800 Subject: [PATCH 06/61] encode sample --- ethstorage/miner/miner_test.go | 8 ++++---- ethstorage/miner/worker.go | 11 +++++++++-- ethstorage/prover/utils.go | 15 +++++++++++++++ integration_tests/node_mine_test.go | 8 ++++---- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 7c042cea..abdd0e55 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -71,15 +71,15 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - miner := New(defaultConfig, db, storageMgr, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + miner := New(defaultConfig, db, storageMgr, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { kvData, exist, err := storageMgr.TryRead(kvIdx, int(storageMgr.MaxKvSize()), kvHash) if err != nil { - return nil, err + return nil, false, err } if !exist { - return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) } - return kvData, nil + return kvData, false, nil }, &pvr, fd, lg) return miner } diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index 15bc4268..f02d19b8 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/params" es "github.com/ethstorage/go-ethstorage/ethstorage" "github.com/ethstorage/go-ethstorage/ethstorage/eth" + "github.com/ethstorage/go-ethstorage/ethstorage/prover" ) const ( @@ -623,15 +624,21 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) var encodedSample common.Hash if fromCache { - encodeType, _ := w.storageMgr.GetShardEncodeType(t.shardIdx) sampleSize := uint64(1 << sampleSizeBits) sampleIdxByte := sampleIdxsInKv[i] * sampleSize sample := kvData[sampleIdxByte : sampleIdxByte+sampleSize] - encodedBytes := es.EncodeChunk(sampleSize, sample, encodeType, encodingKeys[i]) + mask, err := prover.GenerateMask(encodingKeys[i], sampleIdxsInKv[i]) + if err != nil { + w.lg.Error("Generate mask error", "encodingKey", encodingKeys[i], "sampleIdx", sampleIdxsInKv[i], + "error", err.Error()) + return nil, nil, nil, nil, nil, err + } + encodedBytes := es.MaskDataInPlace(mask, sample) encodedSample = common.BytesToHash(encodedBytes) } else { encodedSample, err = w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) if err != nil { + w.lg.Error("Read sample error", "kvIdx", kvIdxs[i], "sampleIdx", sampleIdx[i], "error", err.Error()) return nil, nil, nil, nil, nil, err } } diff --git a/ethstorage/prover/utils.go b/ethstorage/prover/utils.go index 59810e7b..9f648fe4 100644 --- a/ethstorage/prover/utils.go +++ b/ethstorage/prover/utils.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethstorage/go-ethstorage/ethstorage/encoder" "github.com/iden3/go-rapidsnark/types" ) @@ -274,3 +275,17 @@ func GenerateInput(encodingKey common.Hash, sampleIdx uint64) ([]byte, error) { } return json.Marshal(inputObj) } + +func GenerateMask(encodingKey common.Hash, sampleIdx uint64) ([]byte, error) { + if int(sampleIdx) >= eth.FieldElementsPerBlob { + return nil, fmt.Errorf("sample index out of scope") + } + encodingKeyMod := fr.Modulus().Mod(encodingKey.Big(), fr.Modulus()) + masks, err := encoder.Encode(common.BigToHash(encodingKeyMod), eth.FieldElementsPerBlob*32) + if err != nil { + return nil, err + } + bytesIdx := sampleIdx * 32 + mask := masks[bytesIdx : bytesIdx+32] + return mask, nil +} diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index 4fecd264..b6354a38 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -82,15 +82,15 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - mnr := miner.New(miningConfig, db, storageManager, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + mnr := miner.New(miningConfig, db, storageManager, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { kvData, exist, err := storageManager.TryRead(kvIdx, int(storageManager.MaxKvSize()), kvHash) if err != nil { - return nil, err + return nil, false, err } if !exist { - return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) } - return kvData, nil + return kvData, false, nil }, &pvr, feed, lg) lg.Info("Initialized miner") From 7b1f602fa2aab28beb64175131f4ec5a8c81acbb Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 4 Jul 2024 17:07:44 +0800 Subject: [PATCH 07/61] refactor --- ethstorage/downloader/blob_cache.go | 14 +++++ ethstorage/miner/l1_mining_api.go | 2 +- ethstorage/miner/miner.go | 10 ++-- ethstorage/miner/miner_test.go | 17 ++---- ethstorage/miner/worker.go | 48 ++++++---------- ethstorage/node/blob_querier.go | 86 +++++++++++++++++++++++++++++ ethstorage/node/node.go | 20 +------ integration_tests/node_mine_test.go | 16 ++---- 8 files changed, 137 insertions(+), 76 deletions(-) create mode 100644 ethstorage/node/blob_querier.go diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index bdcae02c..4ce86f8b 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -58,6 +58,20 @@ func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { return nil } +func (c *BlobCache) GetKeyValueByIndexUnsafe(idx uint64) ([]byte, common.Hash) { + c.mu.RLock() + defer c.mu.RUnlock() + + for _, block := range c.blocks { + for _, blob := range block.blobs { + if blob.kvIndex.Uint64() == idx { + return blob.data, blob.hash + } + } + } + return nil, common.Hash{} +} + // TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time // We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means // there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. diff --git a/ethstorage/miner/l1_mining_api.go b/ethstorage/miner/l1_mining_api.go index 66722bfa..eeee8d06 100644 --- a/ethstorage/miner/l1_mining_api.go +++ b/ethstorage/miner/l1_mining_api.go @@ -78,7 +78,7 @@ func (m *l1MiningAPI) GetDataHashes(ctx context.Context, contract common.Address for i := 0; i < len(metas); i++ { var dhash common.Hash copy(dhash[:], metas[i][32-ethstorage.HashSizeInContract:32]) - m.lg.Info("Get data hash", "kvIndex", kvIdxes[i], "hash", dhash.Hex()) + m.lg.Debug("Get data hash", "kvIndex", kvIdxes[i], "hash", dhash.Hex()) hashes = append(hashes, dhash) } return hashes, nil diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index c93eeaa8..2d2f4ac5 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -31,8 +31,10 @@ type L1API interface { type MiningProver interface { GetStorageProof(encodedKVs [][]byte, encodingKey []common.Hash, sampleIdxInKv []uint64) ([]*big.Int, [][]byte, [][]byte, error) } - -type GetBlobFn func(kvIdx uint64, blobHash common.Hash) ([]byte, bool, error) +type DataQuerier interface { + GetBlob(kvIdxe uint64, blobHash common.Hash) ([]byte, error) + ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) +} type miningInfo struct { LastMineTime uint64 @@ -66,7 +68,7 @@ func New( db ethdb.Database, storageMgr *ethstorage.StorageManager, api L1API, - getBlob GetBlobFn, + dataQuerier DataQuerier, prover MiningProver, feed *event.Feed, lg log.Logger, @@ -79,7 +81,7 @@ func New( startCh: make(chan struct{}), stopCh: make(chan struct{}), lg: lg, - worker: newWorker(*config, db, storageMgr, api, getBlob, chainHeadCh, prover, lg), + worker: newWorker(*config, db, storageMgr, api, dataQuerier, chainHeadCh, prover, lg), } miner.wg.Add(1) go miner.update() diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index abdd0e55..213483e3 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -6,7 +6,6 @@ package miner import ( - "fmt" "math/big" "os" "path/filepath" @@ -19,8 +18,10 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/event" es "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" esLog "github.com/ethstorage/go-ethstorage/ethstorage/log" + "github.com/ethstorage/go-ethstorage/ethstorage/node" "github.com/ethstorage/go-ethstorage/ethstorage/p2p/protocol" "github.com/ethstorage/go-ethstorage/ethstorage/prover" ) @@ -71,16 +72,10 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - miner := New(defaultConfig, db, storageMgr, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { - kvData, exist, err := storageMgr.TryRead(kvIdx, int(storageMgr.MaxKvSize()), kvHash) - if err != nil { - return nil, false, err - } - if !exist { - return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) - } - return kvData, false, nil - }, &pvr, fd, lg) + bq := node.NewBlobQuerier(&downloader.Downloader{ + Cache: downloader.NewBlobCache(), + }, storageMgr, client, lg) + miner := New(defaultConfig, db, storageMgr, l1api, bq, &pvr, fd, lg) return miner } diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index f02d19b8..9456661f 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -20,7 +20,6 @@ import ( "github.com/ethereum/go-ethereum/params" es "github.com/ethstorage/go-ethstorage/ethstorage" "github.com/ethstorage/go-ethstorage/ethstorage/eth" - "github.com/ethstorage/go-ethstorage/ethstorage/prover" ) const ( @@ -97,12 +96,12 @@ type result struct { // worker is the main object which takes care of storage mining // and submit the mining result tx to the L1 chain. type worker struct { - config Config - l1API L1API - getBlob GetBlobFn - prover MiningProver - db ethdb.Database - storageMgr *es.StorageManager + config Config + l1API L1API + dataQuerier DataQuerier + prover MiningProver + db ethdb.Database + storageMgr *es.StorageManager chainHeadCh chan eth.L1BlockRef startCh chan uint64 @@ -127,7 +126,7 @@ func newWorker( db ethdb.Database, storageMgr *es.StorageManager, api L1API, - getBlob GetBlobFn, + dataQuerier DataQuerier, chainHeadCh chan eth.L1BlockRef, prover MiningProver, lg log.Logger, @@ -141,7 +140,7 @@ func newWorker( worker := &worker{ config: config, l1API: api, - getBlob: getBlob, + dataQuerier: dataQuerier, prover: prover, chainHeadCh: chainHeadCh, shardTaskMap: make(map[uint64]task), @@ -252,7 +251,7 @@ func (w *worker) newWorkLoop() { if !w.isRunning() { break } - w.lg.Info("Updating tasks with L1 new head", "blockNumber", block.Number, "blockTime", block.Time, "blockHash", block.Hash, "now", uint64(time.Now().Unix())) + w.lg.Debug("Updating tasks with L1 new head", "blockNumber", block.Number, "blockTime", block.Time, "blockHash", block.Hash, "now", uint64(time.Now().Unix())) // TODO suspend mining if: // 1) a mining tx is already submitted; or // 2) if the last mining time is too close (the reward is not enough). @@ -310,7 +309,7 @@ func (w *worker) assignTasks(task task, block eth.L1BlockRef, reqDiff *big.Int) w.lg.Debug("Mining task queued", "shard", ti.shardIdx, "thread", ti.thread, "block", ti.blockNumber, "blockTime", block.Time, "now", uint64(time.Now().Unix())) } } - w.lg.Info("Mining tasks assigned", "miner", task.miner, "shard", task.shardIdx, "threads", w.config.ThreadsPerShard, "block", block.Number, "nonces", w.config.NonceLimit) + w.lg.Debug("Mining tasks assigned", "miner", task.miner, "shard", task.shardIdx, "threads", w.config.ThreadsPerShard, "block", block.Number, "nonces", w.config.NonceLimit) } func (w *worker) updateDifficulty(shardIdx, blockTime uint64) (*big.Int, error) { @@ -593,7 +592,7 @@ func (w *worker) computeHash(shardIdx uint64, hash0 common.Hash) (common.Hash, [ w.storageMgr.MaxKvSizeBits(), sampleSizeBits, shardIdx, w.config.RandomChecks, - w.storageMgr.ReadSampleUnlocked, + w.dataQuerier.ReadSample, hash0, ) } @@ -614,7 +613,7 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, return nil, nil, nil, nil, nil, err } for i := uint64(0); i < checksLen; i++ { - kvData, fromCache, err := w.getBlob(kvIdxs[i], kvHashes[i]) + kvData, err := w.dataQuerier.GetBlob(kvIdxs[i], kvHashes[i]) if err != nil { w.lg.Error("Get data error", "index", kvIdxs[i], "error", err.Error()) return nil, nil, nil, nil, nil, err @@ -622,25 +621,10 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, dataSet[i] = kvData sampleIdxsInKv[i] = sampleIdx[i] % (1 << sampleLenBits) encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) - var encodedSample common.Hash - if fromCache { - sampleSize := uint64(1 << sampleSizeBits) - sampleIdxByte := sampleIdxsInKv[i] * sampleSize - sample := kvData[sampleIdxByte : sampleIdxByte+sampleSize] - mask, err := prover.GenerateMask(encodingKeys[i], sampleIdxsInKv[i]) - if err != nil { - w.lg.Error("Generate mask error", "encodingKey", encodingKeys[i], "sampleIdx", sampleIdxsInKv[i], - "error", err.Error()) - return nil, nil, nil, nil, nil, err - } - encodedBytes := es.MaskDataInPlace(mask, sample) - encodedSample = common.BytesToHash(encodedBytes) - } else { - encodedSample, err = w.storageMgr.ReadSampleUnlocked(t.shardIdx, sampleIdx[i]) - if err != nil { - w.lg.Error("Read sample error", "kvIdx", kvIdxs[i], "sampleIdx", sampleIdx[i], "error", err.Error()) - return nil, nil, nil, nil, nil, err - } + encodedSample, err := w.dataQuerier.ReadSample(t.shardIdx, sampleIdx[i]) + if err != nil { + w.lg.Error("Read sample error", "index", sampleIdx[i], "error", err.Error()) + return nil, nil, nil, nil, nil, err } encodedSamples[i] = encodedSample } diff --git a/ethstorage/node/blob_querier.go b/ethstorage/node/blob_querier.go new file mode 100644 index 00000000..7eb87a6f --- /dev/null +++ b/ethstorage/node/blob_querier.go @@ -0,0 +1,86 @@ +// Copyright 2022-2023, EthStorage. +// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE + +package node + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/downloader" + "github.com/ethstorage/go-ethstorage/ethstorage/eth" + "github.com/ethstorage/go-ethstorage/ethstorage/prover" +) + +const sampleSizeBits = 5 + +type BlobQuerier struct { + dlr *downloader.Downloader + sm *ethstorage.StorageManager + l1 *eth.PollingClient + lg log.Logger +} + +func NewBlobQuerier(dlr *downloader.Downloader, sm *ethstorage.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { + return &BlobQuerier{ + dlr: dlr, + sm: sm, + l1: l1, + lg: lg, + } +} + +func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + blob := n.dlr.Cache.GetKeyValueByIndex(kvIdx, kvHash) + if blob != nil { + n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) + return blob, nil + } + blob, exist, err := n.sm.TryRead(kvIdx, int(n.sm.MaxKvSize()), kvHash) + if err != nil { + return nil, err + } + if !exist { + return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + } + n.lg.Debug("Loaded blob from storage manager", "kvIdx", kvIdx) + return blob, nil +} + +func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { + sampleLenBits := n.sm.MaxKvSizeBits() - sampleSizeBits + kvIdx := sampleIdx >> sampleLenBits + + kvData, kvHash := n.dlr.Cache.GetKeyValueByIndexUnsafe(kvIdx) + if kvData == nil { + encodedSample, err := n.sm.ReadSampleUnlocked(shardIdx, sampleIdx) + if err != nil { + return common.Hash{}, err + } + return encodedSample, nil + } + miner, ok := n.sm.GetShardMiner(shardIdx) + if !ok { + n.lg.Error("Miner not found for shard", "shard", shardIdx) + } + encodingKey := ethstorage.CalcEncodeKey(kvHash, kvIdx, miner) + sampleIdxInKv := sampleIdx % (1 << sampleLenBits) + + start := time.Now() + mask, err := prover.GenerateMask(encodingKey, sampleIdxInKv) + fmt.Printf("kvIdx %d took %s\n", kvIdx, time.Since(start)) + + if err != nil { + n.lg.Error("Generate mask error", "encodingKey", encodingKey, "sampleIdx", sampleIdxInKv, + "error", err.Error()) + return common.Hash{}, err + } + sampleSize := uint64(1 << sampleSizeBits) + sampleIdxByte := sampleIdxInKv * sampleSize + sample := kvData[sampleIdxByte : sampleIdxByte+sampleSize] + encodedBytes := ethstorage.MaskDataInPlace(mask, sample) + return common.BytesToHash(encodedBytes), nil +} diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 08480d17..41b7c4ea 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -13,7 +13,6 @@ import ( "time" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -302,23 +301,8 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - getBlobFn := func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { - blob := n.downloader.Cache.GetKeyValueByIndex(kvIdx, kvHash) - if blob != nil { - n.log.Info("Loaded blob from downloader cache", "kvIdx", kvIdx) - return blob, true, nil - } - blob, exist, err := n.storageManager.TryRead(kvIdx, int(n.storageManager.MaxKvSize()), kvHash) - if err != nil { - return nil, false, err - } - if !exist { - return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) - } - n.log.Info("Loaded blob from storage manager", "kvIdx", kvIdx) - return blob, false, nil - } - n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, getBlobFn, &pvr, n.feed, n.log) + dq := NewBlobQuerier(n.downloader, n.storageManager, n.l1Source, n.log) + n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, dq, &pvr, n.feed, n.log) n.log.Info("Initialized miner") return nil } diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index b6354a38..50e6ffe4 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -22,8 +22,10 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethstorage/go-ethstorage/cmd/es-utils/utils" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" "github.com/ethstorage/go-ethstorage/ethstorage/miner" + "github.com/ethstorage/go-ethstorage/ethstorage/node" "github.com/ethstorage/go-ethstorage/ethstorage/p2p/protocol" "github.com/ethstorage/go-ethstorage/ethstorage/prover" "github.com/ethstorage/go-ethstorage/ethstorage/signer" @@ -82,16 +84,10 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - mnr := miner.New(miningConfig, db, storageManager, l1api, func(kvIdx uint64, kvHash common.Hash) ([]byte, bool, error) { - kvData, exist, err := storageManager.TryRead(kvIdx, int(storageManager.MaxKvSize()), kvHash) - if err != nil { - return nil, false, err - } - if !exist { - return nil, false, fmt.Errorf("kv not found: index=%d", kvIdx) - } - return kvData, false, nil - }, &pvr, feed, lg) + bq := node.NewBlobQuerier(&downloader.Downloader{ + Cache: downloader.NewBlobCache(), + }, storageManager, pClient, lg) + mnr := miner.New(miningConfig, db, storageManager, l1api, bq, &pvr, feed, lg) lg.Info("Initialized miner") l1HeadsSub := event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) { From 6db138619568410273dc72cfa6db0591e34cd0a9 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 4 Jul 2024 19:59:10 +0800 Subject: [PATCH 08/61] sync querier with cache --- ethstorage/blobs/blob_querier.go | 98 +++++++++++++++++++++++++++++ ethstorage/downloader/blob_cache.go | 22 ++++++- ethstorage/downloader/downloader.go | 31 +++++---- ethstorage/miner/l1_mining_api.go | 2 +- ethstorage/miner/miner_test.go | 4 +- ethstorage/node/blob_querier.go | 86 ------------------------- ethstorage/node/node.go | 3 +- integration_tests/node_mine_test.go | 4 +- 8 files changed, 145 insertions(+), 105 deletions(-) create mode 100644 ethstorage/blobs/blob_querier.go delete mode 100644 ethstorage/node/blob_querier.go diff --git a/ethstorage/blobs/blob_querier.go b/ethstorage/blobs/blob_querier.go new file mode 100644 index 00000000..317c824e --- /dev/null +++ b/ethstorage/blobs/blob_querier.go @@ -0,0 +1,98 @@ +// Copyright 2022-2023, EthStorage. +// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE + +package blobs + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/downloader" + "github.com/ethstorage/go-ethstorage/ethstorage/eth" +) + +const sampleSizeBits = 5 + +type BlobQuerier struct { + dlr *downloader.Downloader + sm *ethstorage.StorageManager + l1 *eth.PollingClient + cache sync.Map + lg log.Logger +} + +func NewBlobQuerier(dlr *downloader.Downloader, sm *ethstorage.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { + n := &BlobQuerier{ + dlr: dlr, + sm: sm, + l1: l1, + lg: lg, + } + n.init() + return n +} + +func (n *BlobQuerier) init() { + ch := make(chan common.Hash) + downloader.SubscribeNewBlobs("blob_querier", ch) + go func() { + for { + blockHash := <-ch + n.lg.Info("Handling block from downloader cache", "blockHash", blockHash) + for _, blob := range n.dlr.Cache.Blobs(blockHash) { + encodedBlob := n.encodeBlob(blob) + n.cache.Store(blob.KvIdx(), encodedBlob) + } + } + }() +} + +func (n *BlobQuerier) encodeBlob(blob downloader.Blob) []byte { + shardIdx := blob.KvIdx() >> n.sm.KvEntriesBits() + encodeType, _ := n.sm.GetShardEncodeType(shardIdx) + miner, _ := n.sm.GetShardMiner(shardIdx) + encodeKey := ethstorage.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) + encodedBlob := ethstorage.EncodeChunk(blob.Size(), blob.Data(), encodeType, encodeKey) + n.lg.Info("Encoded blob", "kvIdx", blob.KvIdx(), "miner", miner) + return encodedBlob +} + +func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { + blob := n.dlr.Cache.GetKeyValueByIndex(kvIdx, kvHash) + if blob != nil { + n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) + return blob, nil + } + blob, exist, err := n.sm.TryRead(kvIdx, int(n.sm.MaxKvSize()), kvHash) + if err != nil { + return nil, err + } + if !exist { + return nil, fmt.Errorf("kv not found: index=%d", kvIdx) + } + n.lg.Debug("Loaded blob from storage manager", "kvIdx", kvIdx) + return blob, nil +} + +func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { + sampleLenBits := n.sm.MaxKvSizeBits() - sampleSizeBits + kvIdx := sampleIdx >> sampleLenBits + + if value, ok := n.cache.Load(kvIdx); ok { + encodedBlob := value.([]byte) + sampleIdxInKv := sampleIdx % (1 << sampleLenBits) + sampleSize := uint64(1 << sampleSizeBits) + sampleIdxByte := sampleIdxInKv * sampleSize + sample := encodedBlob[sampleIdxByte : sampleIdxByte+sampleSize] + return common.BytesToHash(sample), nil + } + + encodedSample, err := n.sm.ReadSampleUnlocked(shardIdx, sampleIdx) + if err != nil { + return common.Hash{}, err + } + return encodedSample, nil +} diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index 4ce86f8b..f64a9e60 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -12,6 +12,22 @@ import ( "github.com/ethstorage/go-ethstorage/ethstorage" ) +var dataSubscribers = make(map[string]chan common.Hash) + +func SubscribeNewBlobs(key string, ch chan common.Hash) { + dataSubscribers[key] = ch +} + +func Unsubscribe(key string) { + delete(dataSubscribers, key) +} + +func notifySubscribers(data common.Hash) { + for _, ch := range dataSubscribers { + ch <- data + } +} + type BlobCache struct { blocks map[common.Hash]*blockBlobs mu sync.RWMutex @@ -27,9 +43,11 @@ func (c *BlobCache) SetBlockBlobs(block *blockBlobs) { c.mu.Lock() defer c.mu.Unlock() c.blocks[block.hash] = block + + notifySubscribers(block.hash) } -func (c *BlobCache) Blobs(hash common.Hash) []blob { +func (c *BlobCache) Blobs(hash common.Hash) []Blob { c.mu.RLock() defer c.mu.RUnlock() @@ -37,7 +55,7 @@ func (c *BlobCache) Blobs(hash common.Hash) []blob { return nil } - res := []blob{} + res := []Blob{} for _, blob := range c.blocks[hash].blobs { res = append(res, *blob) } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 4ab8fffa..3f7f0ebe 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -65,18 +65,23 @@ type Downloader struct { mu sync.Mutex } -type blob struct { +type Blob struct { kvIndex *big.Int kvSize *big.Int hash common.Hash data []byte } +func (b *Blob) KvIdx() uint64 { return b.kvIndex.Uint64() } +func (b *Blob) Size() uint64 { return b.kvSize.Uint64() } +func (b *Blob) Data() []byte { return b.data } +func (b *Blob) Hash() common.Hash { return b.hash } + type blockBlobs struct { timestamp uint64 number uint64 hash common.Hash - blobs []*blob + blobs []*Blob } func NewDownloader( @@ -270,7 +275,9 @@ func (s *Downloader) download() { s.log.Error("Save blobs error", "err", err) return } - log.Info("DownloadFinished", "duration(ms)", time.Since(ts).Milliseconds(), "blobs", len(blobs)) + if len(blobs) > 0 { + log.Info("DownloadFinished", "duration(ms)", time.Since(ts).Milliseconds(), "blobs", len(blobs)) + } // save lastDownloadedBlock into database bs := make([]byte, 8) @@ -281,7 +288,7 @@ func (s *Downloader) download() { s.log.Error("Save lastDownloadedBlock into db error", "err", err) return } - s.log.Info("LastDownloadedBlock saved into db", "lastDownloadedBlock", end) + s.log.Debug("LastDownloadedBlock saved into db", "lastDownloadedBlock", end) s.dumpBlobsIfNeeded(blobs) @@ -297,7 +304,7 @@ func (s *Downloader) download() { // 1. Downloading the blobs into the cache when they are not finalized, with the option toCache set to true. // 2. Writing the blobs into the shard file when they are finalized, with the option toCache set to false. // we will attempt to read the blobs from the cache initially. If they don't exist in the cache, we will download them instead. -func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob, error) { +func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob, error) { ts := time.Now() if end < start { @@ -312,7 +319,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob if err != nil { return nil, err } - blobs := []blob{} + blobs := []Blob{} for _, elBlock := range elBlocks { // attempt to read the blobs from the cache first res := s.Cache.Blobs(elBlock.hash) @@ -360,18 +367,20 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob } elBlob.data = clBlob.Data blobs = append(blobs, *elBlob) + s.log.Info("Download range", "cache", toCache, "kvIdx", elBlob.kvIndex) } if toCache { s.Cache.SetBlockBlobs(elBlock) } } - - s.log.Info("Download range", "cache", toCache, "start", start, "end", end, "blobNumber", len(blobs), "duration(ms)", time.Since(ts).Milliseconds()) + if len(blobs) > 0 { + s.log.Info("Download range", "cache", toCache, "start", start, "end", end, "blobNumber", len(blobs), "duration(ms)", time.Since(ts).Milliseconds()) + } return blobs, nil } -func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { +func (s *Downloader) dumpBlobsIfNeeded(blobs []Blob) { if s.dumpDir != "" { for _, blob := range blobs { fileName := filepath.Join(s.dumpDir, fmt.Sprintf("%s.dat", hex.EncodeToString(blob.data[:5]))) @@ -404,7 +413,7 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { timestamp: res.Time, number: event.BlockNumber, hash: event.BlockHash, - blobs: []*blob{}, + blobs: []*Blob{}, }) } @@ -412,7 +421,7 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { hash := common.Hash{} copy(hash[:], event.Topics[3][:]) - blob := blob{ + blob := Blob{ kvIndex: big.NewInt(0).SetBytes(event.Topics[1][:]), kvSize: big.NewInt(0).SetBytes(event.Topics[2][:]), hash: hash, diff --git a/ethstorage/miner/l1_mining_api.go b/ethstorage/miner/l1_mining_api.go index eeee8d06..66722bfa 100644 --- a/ethstorage/miner/l1_mining_api.go +++ b/ethstorage/miner/l1_mining_api.go @@ -78,7 +78,7 @@ func (m *l1MiningAPI) GetDataHashes(ctx context.Context, contract common.Address for i := 0; i < len(metas); i++ { var dhash common.Hash copy(dhash[:], metas[i][32-ethstorage.HashSizeInContract:32]) - m.lg.Debug("Get data hash", "kvIndex", kvIdxes[i], "hash", dhash.Hex()) + m.lg.Info("Get data hash", "kvIndex", kvIdxes[i], "hash", dhash.Hex()) hashes = append(hashes, dhash) } return hashes, nil diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 213483e3..69ff32ab 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -18,10 +18,10 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/event" es "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/blobs" "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" esLog "github.com/ethstorage/go-ethstorage/ethstorage/log" - "github.com/ethstorage/go-ethstorage/ethstorage/node" "github.com/ethstorage/go-ethstorage/ethstorage/p2p/protocol" "github.com/ethstorage/go-ethstorage/ethstorage/prover" ) @@ -72,7 +72,7 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - bq := node.NewBlobQuerier(&downloader.Downloader{ + bq := blobs.NewBlobQuerier(&downloader.Downloader{ Cache: downloader.NewBlobCache(), }, storageMgr, client, lg) miner := New(defaultConfig, db, storageMgr, l1api, bq, &pvr, fd, lg) diff --git a/ethstorage/node/blob_querier.go b/ethstorage/node/blob_querier.go deleted file mode 100644 index 7eb87a6f..00000000 --- a/ethstorage/node/blob_querier.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022-2023, EthStorage. -// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE - -package node - -import ( - "fmt" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ethstorage/go-ethstorage/ethstorage" - "github.com/ethstorage/go-ethstorage/ethstorage/downloader" - "github.com/ethstorage/go-ethstorage/ethstorage/eth" - "github.com/ethstorage/go-ethstorage/ethstorage/prover" -) - -const sampleSizeBits = 5 - -type BlobQuerier struct { - dlr *downloader.Downloader - sm *ethstorage.StorageManager - l1 *eth.PollingClient - lg log.Logger -} - -func NewBlobQuerier(dlr *downloader.Downloader, sm *ethstorage.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { - return &BlobQuerier{ - dlr: dlr, - sm: sm, - l1: l1, - lg: lg, - } -} - -func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { - blob := n.dlr.Cache.GetKeyValueByIndex(kvIdx, kvHash) - if blob != nil { - n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) - return blob, nil - } - blob, exist, err := n.sm.TryRead(kvIdx, int(n.sm.MaxKvSize()), kvHash) - if err != nil { - return nil, err - } - if !exist { - return nil, fmt.Errorf("kv not found: index=%d", kvIdx) - } - n.lg.Debug("Loaded blob from storage manager", "kvIdx", kvIdx) - return blob, nil -} - -func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { - sampleLenBits := n.sm.MaxKvSizeBits() - sampleSizeBits - kvIdx := sampleIdx >> sampleLenBits - - kvData, kvHash := n.dlr.Cache.GetKeyValueByIndexUnsafe(kvIdx) - if kvData == nil { - encodedSample, err := n.sm.ReadSampleUnlocked(shardIdx, sampleIdx) - if err != nil { - return common.Hash{}, err - } - return encodedSample, nil - } - miner, ok := n.sm.GetShardMiner(shardIdx) - if !ok { - n.lg.Error("Miner not found for shard", "shard", shardIdx) - } - encodingKey := ethstorage.CalcEncodeKey(kvHash, kvIdx, miner) - sampleIdxInKv := sampleIdx % (1 << sampleLenBits) - - start := time.Now() - mask, err := prover.GenerateMask(encodingKey, sampleIdxInKv) - fmt.Printf("kvIdx %d took %s\n", kvIdx, time.Since(start)) - - if err != nil { - n.lg.Error("Generate mask error", "encodingKey", encodingKey, "sampleIdx", sampleIdxInKv, - "error", err.Error()) - return common.Hash{}, err - } - sampleSize := uint64(1 << sampleSizeBits) - sampleIdxByte := sampleIdxInKv * sampleSize - sample := kvData[sampleIdxByte : sampleIdxByte+sampleSize] - encodedBytes := ethstorage.MaskDataInPlace(mask, sample) - return common.BytesToHash(encodedBytes), nil -} diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 41b7c4ea..bd5c4b9d 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -20,6 +20,7 @@ import ( ethRPC "github.com/ethereum/go-ethereum/rpc" "github.com/ethstorage/go-ethstorage/ethstorage" "github.com/ethstorage/go-ethstorage/ethstorage/archiver" + "github.com/ethstorage/go-ethstorage/ethstorage/blobs" "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" "github.com/ethstorage/go-ethstorage/ethstorage/metrics" @@ -301,7 +302,7 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - dq := NewBlobQuerier(n.downloader, n.storageManager, n.l1Source, n.log) + dq := blobs.NewBlobQuerier(n.downloader, n.storageManager, n.l1Source, n.log) n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, dq, &pvr, n.feed, n.log) n.log.Info("Initialized miner") return nil diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index 50e6ffe4..6f47c45b 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -22,10 +22,10 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethstorage/go-ethstorage/cmd/es-utils/utils" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/blobs" "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" "github.com/ethstorage/go-ethstorage/ethstorage/miner" - "github.com/ethstorage/go-ethstorage/ethstorage/node" "github.com/ethstorage/go-ethstorage/ethstorage/p2p/protocol" "github.com/ethstorage/go-ethstorage/ethstorage/prover" "github.com/ethstorage/go-ethstorage/ethstorage/signer" @@ -84,7 +84,7 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - bq := node.NewBlobQuerier(&downloader.Downloader{ + bq := blobs.NewBlobQuerier(&downloader.Downloader{ Cache: downloader.NewBlobCache(), }, storageManager, pClient, lg) mnr := miner.New(miningConfig, db, storageManager, l1api, bq, &pvr, feed, lg) From 00b52fa5cbf3fe8fac6a716bc198cbddbe1f0599 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 12:51:54 +0800 Subject: [PATCH 09/61] fixes --- ethstorage/blobs/blob_querier.go | 79 +++++++++++++++++++---------- ethstorage/data_file.go | 12 +++-- ethstorage/downloader/blob_cache.go | 14 ----- ethstorage/miner/miner.go | 4 ++ ethstorage/miner/worker.go | 9 ++-- 5 files changed, 67 insertions(+), 51 deletions(-) diff --git a/ethstorage/blobs/blob_querier.go b/ethstorage/blobs/blob_querier.go index 317c824e..a3582271 100644 --- a/ethstorage/blobs/blob_querier.go +++ b/ethstorage/blobs/blob_querier.go @@ -1,4 +1,4 @@ -// Copyright 2022-2023, EthStorage. +// Copyright 2022-2023, es. // For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE package blobs @@ -9,54 +9,71 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethstorage/go-ethstorage/ethstorage" + es "github.com/ethstorage/go-ethstorage/ethstorage" "github.com/ethstorage/go-ethstorage/ethstorage/downloader" "github.com/ethstorage/go-ethstorage/ethstorage/eth" ) -const sampleSizeBits = 5 +const ( + BlobQuerierName = "blob-querier" +) type BlobQuerier struct { - dlr *downloader.Downloader - sm *ethstorage.StorageManager - l1 *eth.PollingClient - cache sync.Map - lg log.Logger + encodedBlobs sync.Map + dlr *downloader.Downloader + sm *es.StorageManager + l1 *eth.PollingClient + wg sync.WaitGroup + exitCh chan struct{} + lg log.Logger } -func NewBlobQuerier(dlr *downloader.Downloader, sm *ethstorage.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { +func NewBlobQuerier(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { n := &BlobQuerier{ - dlr: dlr, - sm: sm, - l1: l1, - lg: lg, + dlr: dlr, + sm: sm, + l1: l1, + lg: lg, + exitCh: make(chan struct{}), } - n.init() + n.sync() return n } -func (n *BlobQuerier) init() { +func (n *BlobQuerier) sync() { ch := make(chan common.Hash) - downloader.SubscribeNewBlobs("blob_querier", ch) + downloader.SubscribeNewBlobs(BlobQuerierName, ch) go func() { + defer func() { + close(ch) + downloader.Unsubscribe(BlobQuerierName) + n.lg.Info("Downloader cache unsubscribed", "name", BlobQuerierName) + n.wg.Done() + }() for { - blockHash := <-ch - n.lg.Info("Handling block from downloader cache", "blockHash", blockHash) - for _, blob := range n.dlr.Cache.Blobs(blockHash) { - encodedBlob := n.encodeBlob(blob) - n.cache.Store(blob.KvIdx(), encodedBlob) + select { + case blockHash := <-ch: + for _, blob := range n.dlr.Cache.Blobs(blockHash) { + encodedBlob := n.encodeBlob(blob) + n.encodedBlobs.Store(blob.KvIdx(), encodedBlob) + } + case <-n.exitCh: + n.lg.Info("BlobQuerier is exiting from downloader sync loop") + return } } }() + n.wg.Add(1) } func (n *BlobQuerier) encodeBlob(blob downloader.Blob) []byte { shardIdx := blob.KvIdx() >> n.sm.KvEntriesBits() encodeType, _ := n.sm.GetShardEncodeType(shardIdx) miner, _ := n.sm.GetShardMiner(shardIdx) - encodeKey := ethstorage.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) - encodedBlob := ethstorage.EncodeChunk(blob.Size(), blob.Data(), encodeType, encodeKey) - n.lg.Info("Encoded blob", "kvIdx", blob.KvIdx(), "miner", miner) + n.lg.Info("Encoding blob from downloader", "kvIdx", blob.KvIdx(), "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) + encodeKey := es.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) + encodedBlob := es.EncodeChunk(blob.Size(), blob.Data(), encodeType, encodeKey) + n.lg.Info("Encoding blob from downloader done", "kvIdx", blob.KvIdx()) return encodedBlob } @@ -78,14 +95,14 @@ func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) } func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { - sampleLenBits := n.sm.MaxKvSizeBits() - sampleSizeBits + sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits - if value, ok := n.cache.Load(kvIdx); ok { + if value, ok := n.encodedBlobs.Load(kvIdx); ok { encodedBlob := value.([]byte) sampleIdxInKv := sampleIdx % (1 << sampleLenBits) - sampleSize := uint64(1 << sampleSizeBits) - sampleIdxByte := sampleIdxInKv * sampleSize + sampleSize := uint64(1 << es.SampleSizeBits) + sampleIdxByte := sampleIdxInKv << es.SampleSizeBits sample := encodedBlob[sampleIdxByte : sampleIdxByte+sampleSize] return common.BytesToHash(sample), nil } @@ -96,3 +113,9 @@ func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error } return encodedSample, nil } + +func (n *BlobQuerier) Close() { + n.lg.Info("Closing blob querier") + close(n.exitCh) + n.wg.Wait() +} diff --git a/ethstorage/data_file.go b/ethstorage/data_file.go index 3c27bdfb..4f945539 100644 --- a/ethstorage/data_file.go +++ b/ethstorage/data_file.go @@ -25,6 +25,8 @@ const ( VERSION = uint64(1) HEADER_SIZE = 4096 + + SampleSizeBits = 5 // 32 bytes ) // A DataFile represents a local file for a consecutive chunks @@ -131,7 +133,7 @@ func (df *DataFile) ContainsKv(kvIdx uint64) bool { } func (df *DataFile) ContainsSample(sampleIdx uint64) bool { - return df.Contains(sampleIdx * 32 / df.chunkSize) + return df.Contains(sampleIdx << SampleSizeBits / df.chunkSize) } func (df *DataFile) ChunkIdxEnd() uint64 { @@ -174,13 +176,13 @@ func (df *DataFile) ReadSample(sampleIdx uint64) (common.Hash, error) { if !df.ContainsSample(sampleIdx) { return common.Hash{}, fmt.Errorf("sample not found") } - - md := make([]byte, 32) - n, err := df.file.ReadAt(md, HEADER_SIZE+int64(sampleIdx*32)-int64(df.chunkIdxStart*df.chunkSize)) + sampleSize := 1 << SampleSizeBits + md := make([]byte, sampleSize) + n, err := df.file.ReadAt(md, HEADER_SIZE+int64(sampleIdx<> sampleLenBits } From 3fe05f04c39235137d7d6a5919e1526e945b77df Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 14:39:52 +0800 Subject: [PATCH 10/61] safe copy --- ethstorage/downloader/downloader.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 3f7f0ebe..b3a5c1db 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -74,8 +74,12 @@ type Blob struct { func (b *Blob) KvIdx() uint64 { return b.kvIndex.Uint64() } func (b *Blob) Size() uint64 { return b.kvSize.Uint64() } -func (b *Blob) Data() []byte { return b.data } func (b *Blob) Hash() common.Hash { return b.hash } +func (b *Blob) Data() []byte { + var blob []byte + copy(blob[:], b.data) + return blob +} type blockBlobs struct { timestamp uint64 From c363a524881852d49f00fde5e3ad3015818a50ea Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 14:40:22 +0800 Subject: [PATCH 11/61] rename --- .../blobs/{blob_querier.go => blob_reader.go} | 29 +++++++++---------- ethstorage/miner/miner.go | 12 ++++---- ethstorage/miner/miner_test.go | 2 +- ethstorage/miner/worker.go | 22 +++++++------- ethstorage/node/node.go | 2 +- integration_tests/node_mine_test.go | 2 +- 6 files changed, 34 insertions(+), 35 deletions(-) rename ethstorage/blobs/{blob_querier.go => blob_reader.go} (75%) diff --git a/ethstorage/blobs/blob_querier.go b/ethstorage/blobs/blob_reader.go similarity index 75% rename from ethstorage/blobs/blob_querier.go rename to ethstorage/blobs/blob_reader.go index a3582271..afb88f5e 100644 --- a/ethstorage/blobs/blob_querier.go +++ b/ethstorage/blobs/blob_reader.go @@ -15,10 +15,10 @@ import ( ) const ( - BlobQuerierName = "blob-querier" + BlobReaderSubKey = "blob-reader" ) -type BlobQuerier struct { +type BlobReader struct { encodedBlobs sync.Map dlr *downloader.Downloader sm *es.StorageManager @@ -28,8 +28,8 @@ type BlobQuerier struct { lg log.Logger } -func NewBlobQuerier(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobQuerier { - n := &BlobQuerier{ +func NewBlobReader(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobReader { + n := &BlobReader{ dlr: dlr, sm: sm, l1: l1, @@ -40,14 +40,14 @@ func NewBlobQuerier(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.P return n } -func (n *BlobQuerier) sync() { +func (n *BlobReader) sync() { ch := make(chan common.Hash) - downloader.SubscribeNewBlobs(BlobQuerierName, ch) + downloader.SubscribeNewBlobs(BlobReaderSubKey, ch) go func() { defer func() { close(ch) - downloader.Unsubscribe(BlobQuerierName) - n.lg.Info("Downloader cache unsubscribed", "name", BlobQuerierName) + downloader.Unsubscribe(BlobReaderSubKey) + n.lg.Info("Blob reader unsubscribed downloader cache.") n.wg.Done() }() for { @@ -58,7 +58,7 @@ func (n *BlobQuerier) sync() { n.encodedBlobs.Store(blob.KvIdx(), encodedBlob) } case <-n.exitCh: - n.lg.Info("BlobQuerier is exiting from downloader sync loop") + n.lg.Info("Blob reader is exiting from downloader sync loop...") return } } @@ -66,18 +66,17 @@ func (n *BlobQuerier) sync() { n.wg.Add(1) } -func (n *BlobQuerier) encodeBlob(blob downloader.Blob) []byte { +func (n *BlobReader) encodeBlob(blob downloader.Blob) []byte { shardIdx := blob.KvIdx() >> n.sm.KvEntriesBits() encodeType, _ := n.sm.GetShardEncodeType(shardIdx) miner, _ := n.sm.GetShardMiner(shardIdx) n.lg.Info("Encoding blob from downloader", "kvIdx", blob.KvIdx(), "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) encodeKey := es.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) encodedBlob := es.EncodeChunk(blob.Size(), blob.Data(), encodeType, encodeKey) - n.lg.Info("Encoding blob from downloader done", "kvIdx", blob.KvIdx()) return encodedBlob } -func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { +func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { blob := n.dlr.Cache.GetKeyValueByIndex(kvIdx, kvHash) if blob != nil { n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) @@ -94,7 +93,7 @@ func (n *BlobQuerier) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) return blob, nil } -func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { +func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits @@ -114,8 +113,8 @@ func (n *BlobQuerier) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error return encodedSample, nil } -func (n *BlobQuerier) Close() { - n.lg.Info("Closing blob querier") +func (n *BlobReader) Close() { + n.lg.Info("Blob reader is being closed...") close(n.exitCh) n.wg.Wait() } diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index 82952da7..c7d873cf 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -31,7 +31,7 @@ type L1API interface { type MiningProver interface { GetStorageProof(encodedKVs [][]byte, encodingKey []common.Hash, sampleIdxInKv []uint64) ([]*big.Int, [][]byte, [][]byte, error) } -type DataQuerier interface { +type DataReader interface { GetBlob(kvIdxe uint64, blobHash common.Hash) ([]byte, error) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) Close() @@ -54,7 +54,7 @@ func (a *miningInfo) String() string { // Miner creates blocks and searches for proof-of-work values. type Miner struct { - dataQuerier DataQuerier + dataReader DataReader feed *event.Feed worker *worker exitCh chan struct{} @@ -70,21 +70,21 @@ func New( db ethdb.Database, storageMgr *ethstorage.StorageManager, api L1API, - dataQuerier DataQuerier, + dr DataReader, prover MiningProver, feed *event.Feed, lg log.Logger, ) *Miner { chainHeadCh := make(chan eth.L1BlockRef, chainHeadChanSize) miner := &Miner{ - dataQuerier: dataQuerier, + dataReader: dr, feed: feed, ChainHeadCh: chainHeadCh, exitCh: make(chan struct{}), startCh: make(chan struct{}), stopCh: make(chan struct{}), lg: lg, - worker: newWorker(*config, db, storageMgr, api, dataQuerier, chainHeadCh, prover, lg), + worker: newWorker(*config, db, storageMgr, api, dr, chainHeadCh, prover, lg), } miner.wg.Add(1) go miner.update() @@ -148,7 +148,7 @@ func (miner *Miner) Stop() { } func (miner *Miner) Close() { - miner.dataQuerier.Close() + miner.dataReader.Close() miner.Stop() miner.lg.Warn("Miner is being closed...") close(miner.exitCh) diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 69ff32ab..7dd57e84 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -72,7 +72,7 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobQuerier(&downloader.Downloader{ + bq := blobs.NewBlobReader(&downloader.Downloader{ Cache: downloader.NewBlobCache(), }, storageMgr, client, lg) miner := New(defaultConfig, db, storageMgr, l1api, bq, &pvr, fd, lg) diff --git a/ethstorage/miner/worker.go b/ethstorage/miner/worker.go index f9173e03..80010026 100644 --- a/ethstorage/miner/worker.go +++ b/ethstorage/miner/worker.go @@ -95,12 +95,12 @@ type result struct { // worker is the main object which takes care of storage mining // and submit the mining result tx to the L1 chain. type worker struct { - config Config - l1API L1API - dataQuerier DataQuerier - prover MiningProver - db ethdb.Database - storageMgr *es.StorageManager + config Config + l1API L1API + dataReader DataReader + prover MiningProver + db ethdb.Database + storageMgr *es.StorageManager chainHeadCh chan eth.L1BlockRef startCh chan uint64 @@ -125,7 +125,7 @@ func newWorker( db ethdb.Database, storageMgr *es.StorageManager, api L1API, - dataQuerier DataQuerier, + dr DataReader, chainHeadCh chan eth.L1BlockRef, prover MiningProver, lg log.Logger, @@ -139,7 +139,7 @@ func newWorker( worker := &worker{ config: config, l1API: api, - dataQuerier: dataQuerier, + dataReader: dr, prover: prover, chainHeadCh: chainHeadCh, shardTaskMap: make(map[uint64]task), @@ -593,7 +593,7 @@ func (w *worker) computeHash(shardIdx uint64, hash0 common.Hash) (common.Hash, [ es.SampleSizeBits, shardIdx, w.config.RandomChecks, - w.dataQuerier.ReadSample, + w.dataReader.ReadSample, hash0, ) } @@ -614,7 +614,7 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, return nil, nil, nil, nil, nil, err } for i := uint64(0); i < checksLen; i++ { - kvData, err := w.dataQuerier.GetBlob(kvIdxs[i], kvHashes[i]) + kvData, err := w.dataReader.GetBlob(kvIdxs[i], kvHashes[i]) if err != nil { w.lg.Error("Get data error", "index", kvIdxs[i], "error", err.Error()) return nil, nil, nil, nil, nil, err @@ -622,7 +622,7 @@ func (w *worker) getMiningData(t *task, sampleIdx []uint64) ([][]byte, []uint64, dataSet[i] = kvData sampleIdxsInKv[i] = sampleIdx[i] % (1 << sampleLenBits) encodingKeys[i] = es.CalcEncodeKey(kvHashes[i], kvIdxs[i], t.miner) - encodedSample, err := w.dataQuerier.ReadSample(t.shardIdx, sampleIdx[i]) + encodedSample, err := w.dataReader.ReadSample(t.shardIdx, sampleIdx[i]) if err != nil { w.lg.Error("Read sample error", "index", sampleIdx[i], "error", err.Error()) return nil, nil, nil, nil, nil, err diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index bd5c4b9d..d09aeaf7 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -302,7 +302,7 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - dq := blobs.NewBlobQuerier(n.downloader, n.storageManager, n.l1Source, n.log) + dq := blobs.NewBlobReader(n.downloader, n.storageManager, n.l1Source, n.log) n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, dq, &pvr, n.feed, n.log) n.log.Info("Initialized miner") return nil diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index 6f47c45b..9d4e7375 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -84,7 +84,7 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobQuerier(&downloader.Downloader{ + bq := blobs.NewBlobReader(&downloader.Downloader{ Cache: downloader.NewBlobCache(), }, storageManager, pClient, lg) mnr := miner.New(miningConfig, db, storageManager, l1api, bq, &pvr, feed, lg) From 2682b53395eec4e99cdd26b638bcd0156249959f Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 15:00:06 +0800 Subject: [PATCH 12/61] revert --- ethstorage/prover/utils.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/ethstorage/prover/utils.go b/ethstorage/prover/utils.go index 9f648fe4..59810e7b 100644 --- a/ethstorage/prover/utils.go +++ b/ethstorage/prover/utils.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethstorage/go-ethstorage/ethstorage/encoder" "github.com/iden3/go-rapidsnark/types" ) @@ -275,17 +274,3 @@ func GenerateInput(encodingKey common.Hash, sampleIdx uint64) ([]byte, error) { } return json.Marshal(inputObj) } - -func GenerateMask(encodingKey common.Hash, sampleIdx uint64) ([]byte, error) { - if int(sampleIdx) >= eth.FieldElementsPerBlob { - return nil, fmt.Errorf("sample index out of scope") - } - encodingKeyMod := fr.Modulus().Mod(encodingKey.Big(), fr.Modulus()) - masks, err := encoder.Encode(common.BigToHash(encodingKeyMod), eth.FieldElementsPerBlob*32) - if err != nil { - return nil, err - } - bytesIdx := sampleIdx * 32 - mask := masks[bytesIdx : bytesIdx+32] - return mask, nil -} From 527de46c6e864545a7c5931c6ef0098a8f070520 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 15:00:36 +0800 Subject: [PATCH 13/61] comments --- ethstorage/blobs/blob_reader.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index afb88f5e..4bca733b 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -18,6 +18,8 @@ const ( BlobReaderSubKey = "blob-reader" ) +// BlobReader provides unified interface for the miner to read blobs and samples +// from StorageManager and downloader cache. type BlobReader struct { encodedBlobs sync.Map dlr *downloader.Downloader @@ -40,6 +42,8 @@ func NewBlobReader(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.Po return n } +// In order to provide miner with encoded samples in a timely manner, +// BlobReader is tracing the downloader and encoding newly cached blobs. func (n *BlobReader) sync() { ch := make(chan common.Hash) downloader.SubscribeNewBlobs(BlobReaderSubKey, ch) From c92993a4b90e2e6e2bc523316ae12b4d7f0e87fe Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 15:00:50 +0800 Subject: [PATCH 14/61] minor --- ethstorage/node/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index d09aeaf7..56ffb952 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -302,8 +302,8 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - dq := blobs.NewBlobReader(n.downloader, n.storageManager, n.l1Source, n.log) - n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, dq, &pvr, n.feed, n.log) + br := blobs.NewBlobReader(n.downloader, n.storageManager, n.l1Source, n.log) + n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, br, &pvr, n.feed, n.log) n.log.Info("Initialized miner") return nil } From 784bfea0afa420925db38669f2477aa90c5e9353 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 18:19:20 +0800 Subject: [PATCH 15/61] use billy --- ethstorage/downloader/blob_cache.go | 112 ++++++++++++++++++++++++---- ethstorage/downloader/downloader.go | 12 ++- ethstorage/node/node.go | 2 +- go.mod | 1 + go.sum | 2 + 5 files changed, 111 insertions(+), 18 deletions(-) diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index bdcae02c..6a2a3f70 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -5,40 +5,82 @@ package downloader import ( "bytes" + "os" + "path/filepath" "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/billy" "github.com/ethstorage/go-ethstorage/ethstorage" ) +const ( + blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement + maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob + blobCacheDir = "cached_blobs" +) + type BlobCache struct { - blocks map[common.Hash]*blockBlobs + store billy.Database + lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries mu sync.RWMutex } func NewBlobCache() *BlobCache { return &BlobCache{ - blocks: map[common.Hash]*blockBlobs{}, + lookup: make(map[common.Hash]uint64), + } +} + +func (c *BlobCache) Init(datadir string) error { + cbdir := filepath.Join(datadir, blobCacheDir) + if err := os.MkdirAll(cbdir, 0700); err != nil { + return err + } + store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) + if err != nil { + return err } + c.store = store + return nil } -func (c *BlobCache) SetBlockBlobs(block *blockBlobs) { +func (c *BlobCache) SetBlockBlobs(block *blockBlobs) error { + rlpBlock, err := rlp.EncodeToBytes(block) + if err != nil { + log.Error("Failed to encode transaction for storage", "hash", block.hash, "err", err) + return err + } + id, err := c.store.Put(rlpBlock) + if err != nil { + log.Error("Failed to write blob into storage", "hash", block.hash, "err", err) + return err + } + c.mu.Lock() - defer c.mu.Unlock() - c.blocks[block.hash] = block + c.lookup[block.hash] = id + c.mu.Unlock() + return nil } func (c *BlobCache) Blobs(hash common.Hash) []blob { c.mu.RLock() - defer c.mu.RUnlock() - - if _, exist := c.blocks[hash]; !exist { + id, ok := c.lookup[hash] + c.mu.RUnlock() + if !ok { + return nil + } + block, err := c.fromIdToBlock(id) + if err != nil { return nil } res := []blob{} - for _, blob := range c.blocks[hash].blobs { + for _, blob := range block.blobs { res = append(res, *blob) } return res @@ -47,10 +89,14 @@ func (c *BlobCache) Blobs(hash common.Hash) []blob { func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { c.mu.RLock() defer c.mu.RUnlock() - - for _, block := range c.blocks { + for _, id := range c.lookup { + block, err := c.fromIdToBlock(id) + if err != nil { + return nil + } for _, blob := range block.blobs { - if blob.kvIndex.Uint64() == idx && bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { + if blob.kvIndex.Uint64() == idx && + bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { return blob.data } } @@ -65,9 +111,47 @@ func (c *BlobCache) Cleanup(finalized uint64) { c.mu.Lock() defer c.mu.Unlock() - for hash, block := range c.blocks { + for hash, id := range c.lookup { + block, err := c.fromIdToBlock(id) + if err != nil { + log.Warn("Failed to get block from id", "id", id, "err", err) + continue + } if block.number <= finalized { - delete(c.blocks, hash) + if err := c.store.Delete(id); err != nil { + log.Error("Failed to delete block from id", "id", id, "err", err) + } + delete(c.lookup, hash) } } } + +func (c *BlobCache) fromIdToBlock(id uint64) (*blockBlobs, error) { + data, err := c.store.Get(id) + if err != nil { + log.Error("Failed to get block from id", "id", id, "err", err) + return nil, err + } + item := new(blockBlobs) + if err := rlp.DecodeBytes(data, item); err != nil { + log.Error("Failed to decode block", "id", id, "err", err) + return nil, err + } + return item, nil +} + +func (c *BlobCache) Close() error { + return c.store.Close() +} + +// newSlotter creates a helper method for the Billy datastore that returns the +// individual shelf sizes used to store blobs in. +func newSlotter() func() (uint32, bool) { + var slotsize uint32 + + return func() (size uint32, done bool) { + slotsize += blobSize + finished := slotsize > maxBlobsPerTransaction*blobSize + return slotsize, finished + } +} diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 0ade7bdb..70f311ad 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -110,7 +110,10 @@ func NewDownloader( } // Start starts up the state loop. -func (s *Downloader) Start() error { +func (s *Downloader) Start(datadir string) error { + if err := s.Cache.Init(datadir); err != nil { + return err + } // user does NOT specify a download start in the flag if s.lastDownloadBlock == 0 { bs, err := s.db.Get(append(downloaderPrefix, lastDownloadKey...)) @@ -153,7 +156,7 @@ func (s *Downloader) Start() error { func (s *Downloader) Close() error { s.done <- struct{}{} s.wg.Wait() - return nil + return s.Cache.Close() } func (s *Downloader) OnL1Finalized(finalized uint64) { @@ -368,7 +371,10 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob blobs = append(blobs, *elBlob) } if toCache { - s.Cache.SetBlockBlobs(elBlock) + if err := s.Cache.SetBlockBlobs(elBlock); err != nil { + s.log.Error("Failed to cache blobs", "block", elBlock.number, "err", err) + return nil, err + } } } diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index aa3deae1..4a94251b 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -327,7 +327,7 @@ func (n *EsNode) Start(ctx context.Context, cfg *Config) error { n.miner.Start() } - if err := n.downloader.Start(); err != nil { + if err := n.downloader.Start(cfg.DataDir); err != nil { n.log.Error("Could not start a downloader", "err", err) return err } diff --git a/go.mod b/go.mod index 932ee4c2..3444e81a 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect + github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect diff --git a/go.sum b/go.sum index b807f7b8..dec203a3 100644 --- a/go.sum +++ b/go.sum @@ -300,6 +300,8 @@ github.com/herumi/bls-eth-go-binary v1.28.1 h1:fcIZ48y5EE9973k05XjE8+P3YiQgjZz4J github.com/herumi/bls-eth-go-binary v1.28.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da h1:8qEhdMGSUx67L2s5aGQinJhOwLfIRKLRBHPQq8m6WxE= +github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= From 374b3a138984661d78fefc52f8018359b6c10270 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 5 Jul 2024 19:20:01 +0800 Subject: [PATCH 16/61] fix comments --- ethstorage/blobs/blob_reader.go | 15 +++++++++++---- ethstorage/downloader/blob_cache.go | 26 +++++++++++++++++++------- ethstorage/downloader/downloader.go | 2 +- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index 4bca733b..e712b038 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -45,22 +45,29 @@ func NewBlobReader(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.Po // In order to provide miner with encoded samples in a timely manner, // BlobReader is tracing the downloader and encoding newly cached blobs. func (n *BlobReader) sync() { - ch := make(chan common.Hash) - downloader.SubscribeNewBlobs(BlobReaderSubKey, ch) + var ( + iCh = make(chan common.Hash) + oCh = make(chan common.Hash) + ) + downloader.SubscribeCachedBlobs(BlobReaderSubKey, iCh, oCh) go func() { defer func() { - close(ch) + close(iCh) + close(oCh) downloader.Unsubscribe(BlobReaderSubKey) n.lg.Info("Blob reader unsubscribed downloader cache.") n.wg.Done() }() for { select { - case blockHash := <-ch: + case blockHash := <-iCh: for _, blob := range n.dlr.Cache.Blobs(blockHash) { encodedBlob := n.encodeBlob(blob) n.encodedBlobs.Store(blob.KvIdx(), encodedBlob) } + case blockHash := <-oCh: + n.encodedBlobs.Delete(blockHash) + n.lg.Info("Blob reader deleted encoded blobs of block", "blockHash", blockHash) case <-n.exitCh: n.lg.Info("Blob reader is exiting from downloader sync loop...") return diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index 814c18f5..e145e366 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -12,18 +12,29 @@ import ( "github.com/ethstorage/go-ethstorage/ethstorage" ) -var dataSubscribers = make(map[string]chan common.Hash) +var ( + storeSubscribers = make(map[string]chan common.Hash) + clearSubscribers = make(map[string]chan common.Hash) +) -func SubscribeNewBlobs(key string, ch chan common.Hash) { - dataSubscribers[key] = ch +func SubscribeCachedBlobs(key string, ich, och chan common.Hash) { + storeSubscribers[key] = ich + clearSubscribers[key] = och } func Unsubscribe(key string) { - delete(dataSubscribers, key) + delete(storeSubscribers, key) + delete(clearSubscribers, key) +} + +func notifyStore(data common.Hash) { + for _, ch := range storeSubscribers { + ch <- data + } } -func notifySubscribers(data common.Hash) { - for _, ch := range dataSubscribers { +func notifyClear(data common.Hash) { + for _, ch := range clearSubscribers { ch <- data } } @@ -44,7 +55,7 @@ func (c *BlobCache) SetBlockBlobs(block *blockBlobs) { defer c.mu.Unlock() c.blocks[block.hash] = block - notifySubscribers(block.hash) + notifyStore(block.hash) } func (c *BlobCache) Blobs(hash common.Hash) []Blob { @@ -86,6 +97,7 @@ func (c *BlobCache) Cleanup(finalized uint64) { for hash, block := range c.blocks { if block.number <= finalized { delete(c.blocks, hash) + notifyClear(hash) } } } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index f145a734..02f74f61 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -76,7 +76,7 @@ func (b *Blob) KvIdx() uint64 { return b.kvIndex.Uint64() } func (b *Blob) Size() uint64 { return b.kvSize.Uint64() } func (b *Blob) Hash() common.Hash { return b.hash } func (b *Blob) Data() []byte { - var blob []byte + blob := make([]byte, len(b.data)) copy(blob[:], b.data) return blob } From 71837dec042b65f8bfdd7aa9db78193b50e57cc3 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 8 Jul 2024 10:32:58 +0800 Subject: [PATCH 17/61] close the dependency last --- ethstorage/miner/miner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index c7d873cf..0042f45b 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -148,11 +148,11 @@ func (miner *Miner) Stop() { } func (miner *Miner) Close() { - miner.dataReader.Close() miner.Stop() miner.lg.Warn("Miner is being closed...") close(miner.exitCh) miner.wg.Wait() + miner.dataReader.Close() } func (miner *Miner) Mining() bool { From dcedfa7fe021b55dc0e1fab937c27f17d56e0c62 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 8 Jul 2024 19:03:36 +0800 Subject: [PATCH 18/61] tests --- ethstorage/downloader/blob_cache.go | 19 +++-- ethstorage/downloader/blob_cache_test.go | 93 ++++++++++++++++++++++++ ethstorage/downloader/downloader.go | 68 +++++++++++++++++ 3 files changed, 174 insertions(+), 6 deletions(-) create mode 100644 ethstorage/downloader/blob_cache_test.go diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index 6a2a3f70..62c2d9a9 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -26,7 +26,7 @@ const ( type BlobCache struct { store billy.Database - lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries + lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries id mu sync.RWMutex } @@ -64,6 +64,8 @@ func (c *BlobCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() c.lookup[block.hash] = id c.mu.Unlock() + + log.Info("Set blockBlobs to cache", "id", id, "block", block.number) return nil } @@ -74,7 +76,8 @@ func (c *BlobCache) Blobs(hash common.Hash) []blob { if !ok { return nil } - block, err := c.fromIdToBlock(id) + log.Info("Blobs from cache", "hash", hash, "id", id) + block, err := c.getBlockBlobsById(id) if err != nil { return nil } @@ -89,8 +92,9 @@ func (c *BlobCache) Blobs(hash common.Hash) []blob { func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { c.mu.RLock() defer c.mu.RUnlock() + for _, id := range c.lookup { - block, err := c.fromIdToBlock(id) + block, err := c.getBlockBlobsById(id) if err != nil { return nil } @@ -112,7 +116,7 @@ func (c *BlobCache) Cleanup(finalized uint64) { defer c.mu.Unlock() for hash, id := range c.lookup { - block, err := c.fromIdToBlock(id) + block, err := c.getBlockBlobsById(id) if err != nil { log.Warn("Failed to get block from id", "id", id, "err", err) continue @@ -122,11 +126,12 @@ func (c *BlobCache) Cleanup(finalized uint64) { log.Error("Failed to delete block from id", "id", id, "err", err) } delete(c.lookup, hash) + log.Info("Cleanup deleted", "finalized", finalized, "block", block.number, "id", id) } } } -func (c *BlobCache) fromIdToBlock(id uint64) (*blockBlobs, error) { +func (c *BlobCache) getBlockBlobsById(id uint64) (*blockBlobs, error) { data, err := c.store.Get(id) if err != nil { log.Error("Failed to get block from id", "id", id, "err", err) @@ -137,6 +142,7 @@ func (c *BlobCache) fromIdToBlock(id uint64) (*blockBlobs, error) { log.Error("Failed to decode block", "id", id, "err", err) return nil, err } + log.Debug("Get blockBlobs by id", "id", id, "blockBlobs", item) return item, nil } @@ -151,7 +157,8 @@ func newSlotter() func() (uint32, bool) { return func() (size uint32, done bool) { slotsize += blobSize - finished := slotsize > maxBlobsPerTransaction*blobSize + finished := slotsize >= maxBlobsPerTransaction*blobSize + log.Debug("new slotter", "slotSize", slotsize, "finished", finished) return slotsize, finished } } diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go new file mode 100644 index 00000000..55c986c4 --- /dev/null +++ b/ethstorage/downloader/blob_cache_test.go @@ -0,0 +1,93 @@ +package downloader + +import ( + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func TestBlobCache(t *testing.T) { + tmpDir := t.TempDir() + datadir := filepath.Join(tmpDir, "datadir") + err := os.MkdirAll(datadir, 0700) + if err != nil { + t.Fatalf("Failed to create datadir: %v", err) + } + t.Logf("datadir %s", datadir) + cache := NewBlobCache() + + err = cache.Init(datadir) + if err != nil { + t.Fatalf("Failed to initialize BlobCache: %v", err) + } + + var blobLen uint64 = 4 + block := newBlockBlobs(0, blobLen) + block.number = 10 + + err = cache.SetBlockBlobs(block) + if err != nil { + t.Fatalf("Failed to set block blobs: %v", err) + } + + blobs := cache.Blobs(block.hash) + if len(blobs) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs: got %d, want %d", len(blobs), len(block.blobs)) + } + + for i, blob := range block.blobs { + blobData := cache.GetKeyValueByIndex(uint64(i), blob.hash) + if !reflect.DeepEqual(blobData, blob.data) { + t.Fatalf("Unexpected blob data at index %d: got %+v, want %+v", i, blobData, blob.data) + } + } + + cache.Cleanup(5) + blobsAfterCleanup := cache.Blobs(block.hash) + if len(blobsAfterCleanup) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) + } + + err = cache.Close() + if err != nil { + t.Fatalf("Failed to close BlobCache: %v", err) + } +} + +func newBlockBlobs(blockIdx, blobLen uint64) *blockBlobs { + block := &blockBlobs{ + hash: common.BigToHash(new(big.Int).SetUint64(blockIdx)), + blobs: make([]*blob, blobLen), + } + for i := uint64(0); i < blobLen; i++ { + kvIdx := new(big.Int).SetUint64(blockIdx*blobLen + i) + blob := &blob{ + kvIndex: kvIdx, + hash: common.BigToHash(kvIdx), + data: []byte(fmt.Sprintf("blob data %d", i)), + } + block.blobs[i] = blob + } + return block +} + +func TestNewSlotter(t *testing.T) { + slotter := newSlotter() + var lastSize uint32 + for i := 0; i < 10; i++ { + size, done := slotter() + lastSize = size + if done { + break + } + } + expected := uint32(maxBlobsPerTransaction * blobSize) + if lastSize != expected { + t.Errorf("Slotter returned incorrect total size: got %d, want %d", lastSize, expected) + } +} diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 70f311ad..0144bf6c 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "io" "math/big" "os" "path/filepath" @@ -19,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethstorage/go-ethstorage/ethstorage" @@ -72,6 +74,39 @@ type blob struct { data []byte } +func (b *blob) String() string { + return fmt.Sprintf("blob{kvIndex: %d, hash: %x, data: %s}", b.kvIndex, b.hash, b.data) +} + +func (b *blob) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, []interface{}{ + b.kvIndex, + b.kvSize, + b.hash, + b.data, + }) +} + +func (b *blob) DecodeRLP(s *rlp.Stream) error { + var decodedData struct { + KvIndex *big.Int + KvSize *big.Int + Hash common.Hash + Data []byte + } + + if err := s.Decode(&decodedData); err != nil { + return err + } + + b.kvIndex = decodedData.KvIndex + b.kvSize = decodedData.KvSize + b.hash = decodedData.Hash + b.data = decodedData.Data + + return nil +} + type blockBlobs struct { timestamp uint64 number uint64 @@ -79,6 +114,39 @@ type blockBlobs struct { blobs []*blob } +func (b *blockBlobs) String() string { + return fmt.Sprintf("blockBlobs{number: %d, timestamp: %d, hash: %x, blobs: %d}", b.number, b.timestamp, b.hash, len(b.blobs)) +} + +func (bb *blockBlobs) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, []interface{}{ + bb.timestamp, + bb.number, + bb.hash, + bb.blobs, + }) +} + +func (bb *blockBlobs) DecodeRLP(s *rlp.Stream) error { + var decodedData struct { + Timestamp uint64 + Number uint64 + Hash common.Hash + Blobs []*blob + } + + if err := s.Decode(&decodedData); err != nil { + return err + } + + bb.timestamp = decodedData.Timestamp + bb.number = decodedData.Number + bb.hash = decodedData.Hash + bb.blobs = decodedData.Blobs + + return nil +} + func NewDownloader( l1Source *eth.PollingClient, l1Beacon *eth.BeaconClient, From c7431c9d45052bed89d74c286136bf20385afc17 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 9 Jul 2024 11:41:06 +0800 Subject: [PATCH 19/61] fix encode chunk size --- ethstorage/blobs/blob_reader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index e712b038..dd02167a 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -83,7 +83,7 @@ func (n *BlobReader) encodeBlob(blob downloader.Blob) []byte { miner, _ := n.sm.GetShardMiner(shardIdx) n.lg.Info("Encoding blob from downloader", "kvIdx", blob.KvIdx(), "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) encodeKey := es.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) - encodedBlob := es.EncodeChunk(blob.Size(), blob.Data(), encodeType, encodeKey) + encodedBlob := es.EncodeChunk(n.sm.MaxKvSize(), blob.Data(), encodeType, encodeKey) return encodedBlob } From 3b25881f8974538c5a6c26cd7460c8e8e2fee5b1 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 9 Jul 2024 18:28:30 +0800 Subject: [PATCH 20/61] use encoded blob in cache --- ethstorage/blobs/blob_reader.go | 93 +++++------------------------ ethstorage/downloader/blob_cache.go | 60 +++++++------------ ethstorage/downloader/downloader.go | 39 ++++++------ ethstorage/miner/miner.go | 2 - ethstorage/miner/miner_test.go | 4 +- ethstorage/node/node.go | 15 +++-- ethstorage/storage_manager.go | 11 +++- integration_tests/node_mine_test.go | 4 +- 8 files changed, 79 insertions(+), 149 deletions(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index dd02167a..af6cde4a 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -5,90 +5,35 @@ package blobs import ( "fmt" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" es "github.com/ethstorage/go-ethstorage/ethstorage" - "github.com/ethstorage/go-ethstorage/ethstorage/downloader" - "github.com/ethstorage/go-ethstorage/ethstorage/eth" ) -const ( - BlobReaderSubKey = "blob-reader" -) +type BlobCacheReader interface { + GetKeyValueByIndex(index uint64, hash common.Hash) []byte + GetKeyValueByIndexUnchecked(index uint64) []byte +} // BlobReader provides unified interface for the miner to read blobs and samples // from StorageManager and downloader cache. type BlobReader struct { - encodedBlobs sync.Map - dlr *downloader.Downloader - sm *es.StorageManager - l1 *eth.PollingClient - wg sync.WaitGroup - exitCh chan struct{} - lg log.Logger + cr BlobCacheReader + sm *es.StorageManager + lg log.Logger } -func NewBlobReader(dlr *downloader.Downloader, sm *es.StorageManager, l1 *eth.PollingClient, lg log.Logger) *BlobReader { - n := &BlobReader{ - dlr: dlr, - sm: sm, - l1: l1, - lg: lg, - exitCh: make(chan struct{}), +func NewBlobReader(cr BlobCacheReader, sm *es.StorageManager, lg log.Logger) *BlobReader { + return &BlobReader{ + cr: cr, + sm: sm, + lg: lg, } - n.sync() - return n -} - -// In order to provide miner with encoded samples in a timely manner, -// BlobReader is tracing the downloader and encoding newly cached blobs. -func (n *BlobReader) sync() { - var ( - iCh = make(chan common.Hash) - oCh = make(chan common.Hash) - ) - downloader.SubscribeCachedBlobs(BlobReaderSubKey, iCh, oCh) - go func() { - defer func() { - close(iCh) - close(oCh) - downloader.Unsubscribe(BlobReaderSubKey) - n.lg.Info("Blob reader unsubscribed downloader cache.") - n.wg.Done() - }() - for { - select { - case blockHash := <-iCh: - for _, blob := range n.dlr.Cache.Blobs(blockHash) { - encodedBlob := n.encodeBlob(blob) - n.encodedBlobs.Store(blob.KvIdx(), encodedBlob) - } - case blockHash := <-oCh: - n.encodedBlobs.Delete(blockHash) - n.lg.Info("Blob reader deleted encoded blobs of block", "blockHash", blockHash) - case <-n.exitCh: - n.lg.Info("Blob reader is exiting from downloader sync loop...") - return - } - } - }() - n.wg.Add(1) -} - -func (n *BlobReader) encodeBlob(blob downloader.Blob) []byte { - shardIdx := blob.KvIdx() >> n.sm.KvEntriesBits() - encodeType, _ := n.sm.GetShardEncodeType(shardIdx) - miner, _ := n.sm.GetShardMiner(shardIdx) - n.lg.Info("Encoding blob from downloader", "kvIdx", blob.KvIdx(), "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) - encodeKey := es.CalcEncodeKey(blob.Hash(), blob.KvIdx(), miner) - encodedBlob := es.EncodeChunk(n.sm.MaxKvSize(), blob.Data(), encodeType, encodeKey) - return encodedBlob } func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { - blob := n.dlr.Cache.GetKeyValueByIndex(kvIdx, kvHash) + blob := n.cr.GetKeyValueByIndex(kvIdx, kvHash) if blob != nil { n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) return blob, nil @@ -108,12 +53,12 @@ func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits - if value, ok := n.encodedBlobs.Load(kvIdx); ok { - encodedBlob := value.([]byte) + if blob := n.cr.GetKeyValueByIndexUnchecked(kvIdx); blob != nil { + n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) sampleIdxInKv := sampleIdx % (1 << sampleLenBits) sampleSize := uint64(1 << es.SampleSizeBits) sampleIdxByte := sampleIdxInKv << es.SampleSizeBits - sample := encodedBlob[sampleIdxByte : sampleIdxByte+sampleSize] + sample := blob[sampleIdxByte : sampleIdxByte+sampleSize] return common.BytesToHash(sample), nil } @@ -123,9 +68,3 @@ func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) } return encodedSample, nil } - -func (n *BlobReader) Close() { - n.lg.Info("Blob reader is being closed...") - close(n.exitCh) - n.wg.Wait() -} diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index e145e366..7a2070b9 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -12,53 +12,24 @@ import ( "github.com/ethstorage/go-ethstorage/ethstorage" ) -var ( - storeSubscribers = make(map[string]chan common.Hash) - clearSubscribers = make(map[string]chan common.Hash) -) - -func SubscribeCachedBlobs(key string, ich, och chan common.Hash) { - storeSubscribers[key] = ich - clearSubscribers[key] = och -} - -func Unsubscribe(key string) { - delete(storeSubscribers, key) - delete(clearSubscribers, key) -} - -func notifyStore(data common.Hash) { - for _, ch := range storeSubscribers { - ch <- data - } -} - -func notifyClear(data common.Hash) { - for _, ch := range clearSubscribers { - ch <- data - } -} - -type BlobCache struct { +type BlobMemCache struct { blocks map[common.Hash]*blockBlobs mu sync.RWMutex } -func NewBlobCache() *BlobCache { - return &BlobCache{ +func NewBlobMemCache() *BlobMemCache { + return &BlobMemCache{ blocks: map[common.Hash]*blockBlobs{}, } } -func (c *BlobCache) SetBlockBlobs(block *blockBlobs) { +func (c *BlobMemCache) SetBlockBlobs(block *blockBlobs) { c.mu.Lock() defer c.mu.Unlock() c.blocks[block.hash] = block - - notifyStore(block.hash) } -func (c *BlobCache) Blobs(hash common.Hash) []Blob { +func (c *BlobMemCache) Blobs(hash common.Hash) []blob { c.mu.RLock() defer c.mu.RUnlock() @@ -66,14 +37,14 @@ func (c *BlobCache) Blobs(hash common.Hash) []Blob { return nil } - res := []Blob{} + res := []blob{} for _, blob := range c.blocks[hash].blobs { res = append(res, *blob) } return res } -func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { +func (c *BlobMemCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { c.mu.RLock() defer c.mu.RUnlock() @@ -87,17 +58,30 @@ func (c *BlobCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { return nil } +func (c *BlobMemCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { + c.mu.RLock() + defer c.mu.RUnlock() + + for _, block := range c.blocks { + for _, blob := range block.blobs { + if blob.kvIndex.Uint64() == idx { + return blob.data + } + } + } + return nil +} + // TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time // We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means // there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. -func (c *BlobCache) Cleanup(finalized uint64) { +func (c *BlobMemCache) Cleanup(finalized uint64) { c.mu.Lock() defer c.mu.Unlock() for hash, block := range c.blocks { if block.number <= finalized { delete(c.blocks, hash) - notifyClear(hash) } } } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 02f74f61..b058b5a8 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -38,8 +38,15 @@ var ( lastDownloadKey = []byte("last-download-block") ) +type BlobCache interface { + SetBlockBlobs(block *blockBlobs) + Blobs(hash common.Hash) []blob + GetKeyValueByIndex(idx uint64, hash common.Hash) []byte + Cleanup(finalized uint64) +} + type Downloader struct { - Cache *BlobCache + Cache BlobCache // latestHead and finalizedHead are shared among multiple threads and thus locks must be required when being accessed // others are only accessed by the downloader thread so it is safe to access them in DL thread without locks @@ -65,27 +72,18 @@ type Downloader struct { mu sync.Mutex } -type Blob struct { +type blob struct { kvIndex *big.Int kvSize *big.Int hash common.Hash data []byte } -func (b *Blob) KvIdx() uint64 { return b.kvIndex.Uint64() } -func (b *Blob) Size() uint64 { return b.kvSize.Uint64() } -func (b *Blob) Hash() common.Hash { return b.hash } -func (b *Blob) Data() []byte { - blob := make([]byte, len(b.data)) - copy(blob[:], b.data) - return blob -} - type blockBlobs struct { timestamp uint64 number uint64 hash common.Hash - blobs []*Blob + blobs []*blob } func NewDownloader( @@ -94,6 +92,7 @@ func NewDownloader( daClient *eth.DAClient, db ethdb.Database, sm *ethstorage.StorageManager, + cache BlobCache, downloadStart int64, downloadDump string, minDurationForBlobsRequest uint64, @@ -102,7 +101,7 @@ func NewDownloader( ) *Downloader { sm.DownloadThreadNum = downloadThreadNum return &Downloader{ - Cache: NewBlobCache(), + Cache: cache, l1Source: l1Source, l1Beacon: l1Beacon, daClient: daClient, @@ -314,7 +313,7 @@ func (s *Downloader) download() { // 1. Downloading the blobs into the cache when they are not finalized, with the option toCache set to true. // 2. Writing the blobs into the shard file when they are finalized, with the option toCache set to false. // we will attempt to read the blobs from the cache initially. If they don't exist in the cache, we will download them instead. -func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob, error) { +func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob, error) { ts := time.Now() if end < start { @@ -329,7 +328,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob if err != nil { return nil, err } - blobs := []Blob{} + blobs := []blob{} for _, elBlock := range elBlocks { // attempt to read the blobs from the cache first res := s.Cache.Blobs(elBlock.hash) @@ -375,7 +374,8 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob s.log.Error("Did not find the event specified blob in the CL") } - elBlob.data = clBlob.Data + // encode blobs so that miner can do sampling directly from cache + elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64()) blobs = append(blobs, *elBlob) s.log.Info("Download range", "cache", toCache, "kvIdx", elBlob.kvIndex) } @@ -383,6 +383,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob s.Cache.SetBlockBlobs(elBlock) } } + if len(blobs) > 0 { s.log.Info("Download range", "cache", toCache, "start", start, "end", end, "blobNumber", len(blobs), "duration(ms)", time.Since(ts).Milliseconds()) } @@ -390,7 +391,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]Blob return blobs, nil } -func (s *Downloader) dumpBlobsIfNeeded(blobs []Blob) { +func (s *Downloader) dumpBlobsIfNeeded(blobs []blob) { if s.dumpDir != "" { for _, blob := range blobs { fileName := filepath.Join(s.dumpDir, fmt.Sprintf("%s.dat", hex.EncodeToString(blob.data[:5]))) @@ -423,7 +424,7 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { timestamp: res.Time, number: event.BlockNumber, hash: event.BlockHash, - blobs: []*Blob{}, + blobs: []*blob{}, }) } @@ -431,7 +432,7 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { hash := common.Hash{} copy(hash[:], event.Topics[3][:]) - blob := Blob{ + blob := blob{ kvIndex: big.NewInt(0).SetBytes(event.Topics[1][:]), kvSize: big.NewInt(0).SetBytes(event.Topics[2][:]), hash: hash, diff --git a/ethstorage/miner/miner.go b/ethstorage/miner/miner.go index 0042f45b..c9af8d9a 100644 --- a/ethstorage/miner/miner.go +++ b/ethstorage/miner/miner.go @@ -34,7 +34,6 @@ type MiningProver interface { type DataReader interface { GetBlob(kvIdxe uint64, blobHash common.Hash) ([]byte, error) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) - Close() } type miningInfo struct { @@ -152,7 +151,6 @@ func (miner *Miner) Close() { miner.lg.Warn("Miner is being closed...") close(miner.exitCh) miner.wg.Wait() - miner.dataReader.Close() } func (miner *Miner) Mining() bool { diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 7dd57e84..5f3fdfc6 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -72,9 +72,7 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobReader(&downloader.Downloader{ - Cache: downloader.NewBlobCache(), - }, storageMgr, client, lg) + bq := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageMgr, lg) miner := New(defaultConfig, db, storageMgr, l1api, bq, &pvr, fd, lg) return miner } diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 56ffb952..8f13c216 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -41,11 +41,12 @@ type EsNode struct { l1FinalizedSub ethereum.Subscription // Subscription to get L1 Finalized blocks, a.k.a. justified data (polling) randaoHeadsSub ethereum.Subscription // Subscription to get randao heads (automatically re-subscribes on error) - randaoSource *eth.RandaoClient // RPC client to fetch randao from - l1Source *eth.PollingClient // L1 Client to fetch data from - l1Beacon *eth.BeaconClient // L1 Beacon Chain to fetch blobs from - daClient *eth.DAClient // L1 Data Availability Client - downloader *downloader.Downloader // L2 Engine to Sync + randaoSource *eth.RandaoClient // RPC client to fetch randao from + l1Source *eth.PollingClient // L1 Client to fetch data from + l1Beacon *eth.BeaconClient // L1 Beacon Chain to fetch blobs from + daClient *eth.DAClient // L1 Data Availability Client + blobCache *downloader.BlobMemCache // Cache for blobs + downloader *downloader.Downloader // L2 Engine to Sync // l2Source *sources.EngineClient // L2 Execution Engine RPC bindings // rpcSync *sources.SyncClient // Alt-sync RPC client, optional (may be nil) server *rpcServer // RPC server hosting the rollup-node API @@ -135,12 +136,14 @@ func (n *EsNode) init(ctx context.Context, cfg *Config) error { } func (n *EsNode) initL2(ctx context.Context, cfg *Config) error { + n.blobCache = downloader.NewBlobMemCache() n.downloader = downloader.NewDownloader( n.l1Source, n.l1Beacon, n.daClient, n.db, n.storageManager, + n.blobCache, cfg.Downloader.DownloadStart, cfg.Downloader.DownloadDump, cfg.L1.L1MinDurationForBlobsRequest, @@ -302,7 +305,7 @@ func (n *EsNode) initMiner(ctx context.Context, cfg *Config) error { cfg.Mining.ZKProverImpl, n.log, ) - br := blobs.NewBlobReader(n.downloader, n.storageManager, n.l1Source, n.log) + br := blobs.NewBlobReader(n.blobCache, n.storageManager, n.log) n.miner = miner.New(cfg.Mining, n.db, n.storageManager, l1api, br, &pvr, n.feed, n.log) n.log.Info("Initialized miner") return nil diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 39cbae1a..e6095330 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -52,6 +52,15 @@ func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { } } +func (s *StorageManager) EncodeBlob(blob []byte, blobHash common.Hash, kvIdx uint64) []byte { + shardIdx := kvIdx >> s.KvEntriesBits() + encodeType, _ := s.GetShardEncodeType(shardIdx) + miner, _ := s.GetShardMiner(shardIdx) + log.Info("Encoding blob", "kvIdx", kvIdx, "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) + encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) + return EncodeChunk(s.MaxKvSize(), blob, encodeType, encodeKey) +} + // DownloadFinished This function will be called when the node found new block are finalized, and it will update the // local L1 view and commit new blobs into local storage file. func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs [][]byte, commits []common.Hash) error { @@ -94,7 +103,7 @@ func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs for _, idx := range insertIdx { c := prepareCommit(commits[idx]) // if return false, just ignore because we are not interested in it - _, err = s.shardManager.TryWrite(kvIndices[idx], blobs[idx], c) + _, err = s.shardManager.TryWriteEncoded(kvIndices[idx], blobs[idx], c) if err != nil { break } diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index 9d4e7375..aac8878a 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -84,9 +84,7 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobReader(&downloader.Downloader{ - Cache: downloader.NewBlobCache(), - }, storageManager, pClient, lg) + bq := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageManager, lg) mnr := miner.New(miningConfig, db, storageManager, l1api, bq, &pvr, feed, lg) lg.Info("Initialized miner") From dc34e18ae2902d567ab549690fbfaf3be7445332 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 10 Jul 2024 11:53:24 +0800 Subject: [PATCH 21/61] unit test --- ethstorage/downloader/blob_cache_test.go | 91 ++++++++++++++++++++++++ ethstorage/storage_manager.go | 9 +++ 2 files changed, 100 insertions(+) create mode 100644 ethstorage/downloader/blob_cache_test.go diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go new file mode 100644 index 00000000..9a997873 --- /dev/null +++ b/ethstorage/downloader/blob_cache_test.go @@ -0,0 +1,91 @@ +// Copyright 2022-2023, EthStorage. +// For license information, see https://github.com/ethstorage/es-node/blob/main/LICENSE + +package downloader + +import ( + "bytes" + "fmt" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethstorage/go-ethstorage/ethstorage" +) + +var ( + kvIndex uint64 = 0 + fileName = "test_shard_0.dat" + blobData = "blob data of kvIndex %d" + minerAddr = common.BigToAddress(common.Big0) + kvSize uint64 = 1 << 17 + kvEntries uint64 = 16 + shardID = uint64(0) + bc BlobCache +) + +func init() { + bc = NewBlobMemCache() +} + +func TestBlobCache_GetKeyValueByIndex(t *testing.T) { + tests := []struct { + blockNum uint64 + blobLen uint64 + kvIdxWant uint64 + }{ + {0, 1, 0}, + {1000, 5, 5}, + } + + df, err := ethstorage.Create(fileName, shardID, kvEntries, 0, kvSize, ethstorage.ENCODE_BLOB_POSEIDON, minerAddr, kvSize) + if err != nil { + t.Fatalf("Create failed %v", err) + } + shardMgr := ethstorage.NewShardManager(common.Address{}, kvSize, kvEntries, kvSize) + shardMgr.AddDataShard(shardID) + shardMgr.AddDataFile(df) + sm := ethstorage.NewStorageManager(shardMgr, nil) + for i, tt := range tests { + bb := newBlockBlobs(tt.blockNum, tt.blobLen) + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + for i, b := range bb.blobs { + bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64()) + } + bc.SetBlockBlobs(bb) + kvHash, ok, err := sm.TryReadMeta(tt.kvIdxWant) + if err != nil { + t.Fatalf("TryReadMeta() error = %v", err) + } + if !ok { + t.Fatalf("TryReadMeta() got = %v, want %v", ok, true) + } + blobEncoded := bc.GetKeyValueByIndex(tt.kvIdxWant, common.Hash(kvHash)) + blobDecoded := sm.DecodeBlob(blobEncoded, common.Hash(kvHash), tt.kvIdxWant) + bytesWant := []byte(fmt.Sprintf(blobData, tt.kvIdxWant)) + if !bytes.Equal(blobDecoded[:len(bytesWant)], bytesWant) { + t.Errorf("BlobMemCache.GetKeyValueByIndex() and decoded = %s, want %s", blobDecoded[:len(bytesWant)], bytesWant) + } + }) + } + +} + +func newBlockBlobs(blockNumber, blobLen uint64) *blockBlobs { + block := &blockBlobs{ + number: blockNumber, + hash: common.BigToHash(new(big.Int).SetUint64(blockNumber)), + blobs: make([]*blob, blobLen), + } + for i := uint64(0); i < blobLen; i++ { + kvIdx := new(big.Int).SetUint64(kvIndex) + blob := &blob{ + kvIndex: kvIdx, + hash: common.BigToHash(kvIdx), + data: []byte(fmt.Sprintf(blobData, kvIndex)), + } + block.blobs[i] = blob + kvIndex = kvIndex + 1 + } + return block +} diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index e6095330..dd9578ae 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -61,6 +61,15 @@ func (s *StorageManager) EncodeBlob(blob []byte, blobHash common.Hash, kvIdx uin return EncodeChunk(s.MaxKvSize(), blob, encodeType, encodeKey) } +func (s *StorageManager) DecodeBlob(blob []byte, blobHash common.Hash, kvIdx uint64) []byte { + shardIdx := kvIdx >> s.KvEntriesBits() + encodeType, _ := s.GetShardEncodeType(shardIdx) + miner, _ := s.GetShardMiner(shardIdx) + log.Info("Encoding blob", "kvIdx", kvIdx, "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) + encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) + return DecodeChunk(s.MaxKvSize(), blob, encodeType, encodeKey) +} + // DownloadFinished This function will be called when the node found new block are finalized, and it will update the // local L1 view and commit new blobs into local storage file. func (s *StorageManager) DownloadFinished(newL1 int64, kvIndices []uint64, blobs [][]byte, commits []common.Hash) error { From 0f22b3c1bfc01dd90a02cf8fb7c0b73c3426b19c Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 10 Jul 2024 13:47:37 +0800 Subject: [PATCH 22/61] cleanup --- ethstorage/blobs/blob_reader.go | 11 ++++++++--- ethstorage/downloader/blob_cache.go | 14 -------------- ethstorage/downloader/blob_cache_test.go | 6 ++++++ 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index af6cde4a..07eaa432 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -13,7 +13,6 @@ import ( type BlobCacheReader interface { GetKeyValueByIndex(index uint64, hash common.Hash) []byte - GetKeyValueByIndexUnchecked(index uint64) []byte } // BlobReader provides unified interface for the miner to read blobs and samples @@ -52,8 +51,14 @@ func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits - - if blob := n.cr.GetKeyValueByIndexUnchecked(kvIdx); blob != nil { + kvHash, ok, err := n.sm.TryReadMeta(kvIdx) + if err != nil { + return common.Hash{}, err + } + if !ok { + return common.Hash{}, fmt.Errorf("kv not found: index=%d", kvIdx) + } + if blob := n.cr.GetKeyValueByIndex(kvIdx, common.Hash(kvHash)); blob != nil { n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) sampleIdxInKv := sampleIdx % (1 << sampleLenBits) sampleSize := uint64(1 << es.SampleSizeBits) diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index 7a2070b9..b36d24eb 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -58,20 +58,6 @@ func (c *BlobMemCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { return nil } -func (c *BlobMemCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { - c.mu.RLock() - defer c.mu.RUnlock() - - for _, block := range c.blocks { - for _, blob := range block.blobs { - if blob.kvIndex.Uint64() == idx { - return blob.data - } - } - } - return nil -} - // TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time // We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means // there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 9a997873..d5b652d4 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "math/big" + "os" "testing" "github.com/ethereum/go-ethereum/common" @@ -46,6 +47,11 @@ func TestBlobCache_GetKeyValueByIndex(t *testing.T) { shardMgr.AddDataShard(shardID) shardMgr.AddDataFile(df) sm := ethstorage.NewStorageManager(shardMgr, nil) + defer func() { + sm.Close() + os.Remove(fileName) + }() + for i, tt := range tests { bb := newBlockBlobs(tt.blockNum, tt.blobLen) t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { From 17be474acb657768c9ce0b4c3c0e6d666577705f Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 10 Jul 2024 16:20:35 +0800 Subject: [PATCH 23/61] fix sampling --- ethstorage/blobs/blob_reader.go | 7 ++++--- ethstorage/downloader/blob_cache_test.go | 4 ++-- ethstorage/downloader/downloader.go | 4 ++-- ethstorage/storage_manager.go | 10 ++++------ 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index 07eaa432..585033f1 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -32,11 +32,12 @@ func NewBlobReader(cr BlobCacheReader, sm *es.StorageManager, lg log.Logger) *Bl } func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { - blob := n.cr.GetKeyValueByIndex(kvIdx, kvHash) - if blob != nil { + if blob := n.cr.GetKeyValueByIndex(kvIdx, kvHash); blob != nil { n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) - return blob, nil + blobDecoded := n.sm.DecodeBlob(blob, kvHash, kvIdx, n.sm.MaxKvSize()) + return blobDecoded, nil } + blob, exist, err := n.sm.TryRead(kvIdx, int(n.sm.MaxKvSize()), kvHash) if err != nil { return nil, err diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index d5b652d4..cbda335d 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -56,7 +56,7 @@ func TestBlobCache_GetKeyValueByIndex(t *testing.T) { bb := newBlockBlobs(tt.blockNum, tt.blobLen) t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { for i, b := range bb.blobs { - bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64()) + bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64(), kvSize) } bc.SetBlockBlobs(bb) kvHash, ok, err := sm.TryReadMeta(tt.kvIdxWant) @@ -67,7 +67,7 @@ func TestBlobCache_GetKeyValueByIndex(t *testing.T) { t.Fatalf("TryReadMeta() got = %v, want %v", ok, true) } blobEncoded := bc.GetKeyValueByIndex(tt.kvIdxWant, common.Hash(kvHash)) - blobDecoded := sm.DecodeBlob(blobEncoded, common.Hash(kvHash), tt.kvIdxWant) + blobDecoded := sm.DecodeBlob(blobEncoded, common.Hash(kvHash), tt.kvIdxWant, kvSize) bytesWant := []byte(fmt.Sprintf(blobData, tt.kvIdxWant)) if !bytes.Equal(blobDecoded[:len(bytesWant)], bytesWant) { t.Errorf("BlobMemCache.GetKeyValueByIndex() and decoded = %s, want %s", blobDecoded[:len(bytesWant)], bytesWant) diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index b058b5a8..d42d79b0 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -375,9 +375,9 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob } // encode blobs so that miner can do sampling directly from cache - elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64()) + elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64(), elBlob.kvSize.Uint64()) blobs = append(blobs, *elBlob) - s.log.Info("Download range", "cache", toCache, "kvIdx", elBlob.kvIndex) + s.log.Info("Download range", "blockNumber", elBlock.number, "kvIdx", elBlob.kvIndex) } if toCache { s.Cache.SetBlockBlobs(elBlock) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index dd9578ae..7239cb74 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -52,22 +52,20 @@ func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { } } -func (s *StorageManager) EncodeBlob(blob []byte, blobHash common.Hash, kvIdx uint64) []byte { +func (s *StorageManager) EncodeBlob(blob []byte, blobHash common.Hash, kvIdx, size uint64) []byte { shardIdx := kvIdx >> s.KvEntriesBits() encodeType, _ := s.GetShardEncodeType(shardIdx) miner, _ := s.GetShardMiner(shardIdx) - log.Info("Encoding blob", "kvIdx", kvIdx, "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) - return EncodeChunk(s.MaxKvSize(), blob, encodeType, encodeKey) + return EncodeChunk(size, blob, encodeType, encodeKey) } -func (s *StorageManager) DecodeBlob(blob []byte, blobHash common.Hash, kvIdx uint64) []byte { +func (s *StorageManager) DecodeBlob(blob []byte, blobHash common.Hash, kvIdx, size uint64) []byte { shardIdx := kvIdx >> s.KvEntriesBits() encodeType, _ := s.GetShardEncodeType(shardIdx) miner, _ := s.GetShardMiner(shardIdx) - log.Info("Encoding blob", "kvIdx", kvIdx, "shardIdx", shardIdx, "encodeType", encodeType, "miner", miner) encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) - return DecodeChunk(s.MaxKvSize(), blob, encodeType, encodeKey) + return DecodeChunk(size, blob, encodeType, encodeKey) } // DownloadFinished This function will be called when the node found new block are finalized, and it will update the From cd0de620f049979a844a75fdddb846550f9277c6 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 10 Jul 2024 17:05:33 +0800 Subject: [PATCH 24/61] decode for api --- ethstorage/node/es_api.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ethstorage/node/es_api.go b/ethstorage/node/es_api.go index 59c61676..e5376180 100644 --- a/ethstorage/node/es_api.go +++ b/ethstorage/node/es_api.go @@ -6,6 +6,7 @@ package node import ( "bytes" "errors" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -63,6 +64,8 @@ func (api *esAPI) GetBlob(kvIndex uint64, blobHash common.Hash, decodeType Decod if !found { return nil, ethereum.NotFound } + } else { + blob = api.sm.DecodeBlob(blob, blobHash, kvIndex, size) } ret := blob From a7244371908f595a96f75b859d435cbc217a7cda Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 11 Jul 2024 10:37:28 +0800 Subject: [PATCH 25/61] fix tests --- .gitignore | 1 + ethstorage/blobs/blob_reader.go | 15 ++--- ethstorage/downloader/blob_cache.go | 14 +++++ ethstorage/downloader/blob_cache_test.go | 79 ++++++++++++++---------- ethstorage/downloader/downloader.go | 1 + 5 files changed, 67 insertions(+), 43 deletions(-) diff --git a/.gitignore b/.gitignore index c260b153..a3e2a4ab 100644 --- a/.gitignore +++ b/.gitignore @@ -73,3 +73,4 @@ profile.cov **/yarn-error.log logs/ +*.test diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index 585033f1..f18d460c 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -13,6 +13,7 @@ import ( type BlobCacheReader interface { GetKeyValueByIndex(index uint64, hash common.Hash) []byte + GetKeyValueByIndexUnchecked(index uint64) []byte } // BlobReader provides unified interface for the miner to read blobs and samples @@ -52,14 +53,8 @@ func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits - kvHash, ok, err := n.sm.TryReadMeta(kvIdx) - if err != nil { - return common.Hash{}, err - } - if !ok { - return common.Hash{}, fmt.Errorf("kv not found: index=%d", kvIdx) - } - if blob := n.cr.GetKeyValueByIndex(kvIdx, common.Hash(kvHash)); blob != nil { + // get blob without checking commit since kvHash is not available + if blob := n.cr.GetKeyValueByIndexUnchecked(kvIdx); blob != nil { n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) sampleIdxInKv := sampleIdx % (1 << sampleLenBits) sampleSize := uint64(1 << es.SampleSizeBits) @@ -68,9 +63,9 @@ func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) return common.BytesToHash(sample), nil } - encodedSample, err := n.sm.ReadSampleUnlocked(shardIdx, sampleIdx) + sample, err := n.sm.ReadSampleUnlocked(shardIdx, sampleIdx) if err != nil { return common.Hash{}, err } - return encodedSample, nil + return sample, nil } diff --git a/ethstorage/downloader/blob_cache.go b/ethstorage/downloader/blob_cache.go index b36d24eb..7a2070b9 100644 --- a/ethstorage/downloader/blob_cache.go +++ b/ethstorage/downloader/blob_cache.go @@ -58,6 +58,20 @@ func (c *BlobMemCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { return nil } +func (c *BlobMemCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { + c.mu.RLock() + defer c.mu.RUnlock() + + for _, block := range c.blocks { + for _, blob := range block.blobs { + if blob.kvIndex.Uint64() == idx { + return blob.data + } + } + } + return nil +} + // TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time // We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means // there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index cbda335d..eb056e55 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -11,32 +11,36 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/protolambda/go-kzg/eth" ) var ( - kvIndex uint64 = 0 + bc BlobCache + kvHashes []common.Hash fileName = "test_shard_0.dat" blobData = "blob data of kvIndex %d" minerAddr = common.BigToAddress(common.Big0) kvSize uint64 = 1 << 17 kvEntries uint64 = 16 shardID = uint64(0) - bc BlobCache ) func init() { bc = NewBlobMemCache() } -func TestBlobCache_GetKeyValueByIndex(t *testing.T) { - tests := []struct { - blockNum uint64 - blobLen uint64 - kvIdxWant uint64 +func TestBlobCache_Encoding(t *testing.T) { + blockBlobsParams := []struct { + blockNum uint64 + blobLen uint64 }{ - {0, 1, 0}, - {1000, 5, 5}, + {0, 1}, + {1, 5}, + {1000, 4}, + {12345, 2}, + {2000000, 3}, } df, err := ethstorage.Create(fileName, shardID, kvEntries, 0, kvSize, ethstorage.ENCODE_BLOB_POSEIDON, minerAddr, kvSize) @@ -52,46 +56,55 @@ func TestBlobCache_GetKeyValueByIndex(t *testing.T) { os.Remove(fileName) }() - for i, tt := range tests { - bb := newBlockBlobs(tt.blockNum, tt.blobLen) - t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - for i, b := range bb.blobs { - bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64(), kvSize) - } - bc.SetBlockBlobs(bb) - kvHash, ok, err := sm.TryReadMeta(tt.kvIdxWant) - if err != nil { - t.Fatalf("TryReadMeta() error = %v", err) - } - if !ok { - t.Fatalf("TryReadMeta() got = %v, want %v", ok, true) - } - blobEncoded := bc.GetKeyValueByIndex(tt.kvIdxWant, common.Hash(kvHash)) - blobDecoded := sm.DecodeBlob(blobEncoded, common.Hash(kvHash), tt.kvIdxWant, kvSize) - bytesWant := []byte(fmt.Sprintf(blobData, tt.kvIdxWant)) + for _, tt := range blockBlobsParams { + // download and save to cache + bb, err := newBlockBlobs(tt.blockNum, tt.blobLen) + if err != nil { + t.Fatalf("failed to create block blobs: %v", err) + } + for i, b := range bb.blobs { + bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64(), kvSize) + } + bc.SetBlockBlobs(bb) + } + + // load from cache and verify + for i, kvHash := range kvHashes { + kvIndex := uint64(i) + t.Run(fmt.Sprintf("test kv: %d", i), func(t *testing.T) { + blobEncoded := bc.GetKeyValueByIndexUnchecked(kvIndex) + blobDecoded := sm.DecodeBlob(blobEncoded, kvHash, kvIndex, kvSize) + bytesWant := []byte(fmt.Sprintf(blobData, kvIndex)) if !bytes.Equal(blobDecoded[:len(bytesWant)], bytesWant) { - t.Errorf("BlobMemCache.GetKeyValueByIndex() and decoded = %s, want %s", blobDecoded[:len(bytesWant)], bytesWant) + t.Errorf("GetKeyValueByIndex and decoded = %s, want %s", blobDecoded[:len(bytesWant)], bytesWant) } }) } - } -func newBlockBlobs(blockNumber, blobLen uint64) *blockBlobs { +func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { block := &blockBlobs{ number: blockNumber, hash: common.BigToHash(new(big.Int).SetUint64(blockNumber)), blobs: make([]*blob, blobLen), } for i := uint64(0); i < blobLen; i++ { - kvIdx := new(big.Int).SetUint64(kvIndex) + kvIndex := len(kvHashes) + kvIdx := big.NewInt(int64(kvIndex)) blob := &blob{ kvIndex: kvIdx, - hash: common.BigToHash(kvIdx), data: []byte(fmt.Sprintf(blobData, kvIndex)), } + kzgBlob := kzg4844.Blob{} + copy(kzgBlob[:], blob.data) + commitment, err := kzg4844.BlobToCommitment(kzgBlob) + if err != nil { + return nil, fmt.Errorf( + "failed to create commitment for blob %d: %w", kvIndex, err) + } + blob.hash = common.Hash(eth.KZGToVersionedHash(eth.KZGCommitment(commitment))) block.blobs[i] = blob - kvIndex = kvIndex + 1 + kvHashes = append(kvHashes, blob.hash) } - return block + return block, nil } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index d42d79b0..bef1b918 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -42,6 +42,7 @@ type BlobCache interface { SetBlockBlobs(block *blockBlobs) Blobs(hash common.Hash) []blob GetKeyValueByIndex(idx uint64, hash common.Hash) []byte + GetKeyValueByIndexUnchecked(idx uint64) []byte Cleanup(finalized uint64) } From 74cb4a854839897b45b17c7da708da15cac948a8 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 11 Jul 2024 10:55:25 +0800 Subject: [PATCH 26/61] minor --- ethstorage/downloader/blob_cache_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index eb056e55..613ad98c 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -21,7 +21,7 @@ var ( kvHashes []common.Hash fileName = "test_shard_0.dat" blobData = "blob data of kvIndex %d" - minerAddr = common.BigToAddress(common.Big0) + minerAddr = common.BigToAddress(common.Big1) kvSize uint64 = 1 << 17 kvEntries uint64 = 16 shardID = uint64(0) @@ -38,6 +38,7 @@ func TestBlobCache_Encoding(t *testing.T) { }{ {0, 1}, {1, 5}, + {222, 6}, {1000, 4}, {12345, 2}, {2000000, 3}, @@ -56,8 +57,8 @@ func TestBlobCache_Encoding(t *testing.T) { os.Remove(fileName) }() + // download and save to cache for _, tt := range blockBlobsParams { - // download and save to cache bb, err := newBlockBlobs(tt.blockNum, tt.blobLen) if err != nil { t.Fatalf("failed to create block blobs: %v", err) From f7cfb90f4220124090633c51810a52d8d345b973 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 11 Jul 2024 11:06:22 +0800 Subject: [PATCH 27/61] refactor --- ethstorage/storage_manager.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ethstorage/storage_manager.go b/ethstorage/storage_manager.go index 7239cb74..9d9be6ac 100644 --- a/ethstorage/storage_manager.go +++ b/ethstorage/storage_manager.go @@ -53,19 +53,21 @@ func NewStorageManager(sm *ShardManager, l1Source Il1Source) *StorageManager { } func (s *StorageManager) EncodeBlob(blob []byte, blobHash common.Hash, kvIdx, size uint64) []byte { - shardIdx := kvIdx >> s.KvEntriesBits() - encodeType, _ := s.GetShardEncodeType(shardIdx) - miner, _ := s.GetShardMiner(shardIdx) - encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) + encodeType, encodeKey := s.getEncodingParams(kvIdx, blobHash) return EncodeChunk(size, blob, encodeType, encodeKey) } func (s *StorageManager) DecodeBlob(blob []byte, blobHash common.Hash, kvIdx, size uint64) []byte { + encodeType, encodeKey := s.getEncodingParams(kvIdx, blobHash) + return DecodeChunk(size, blob, encodeType, encodeKey) +} + +func (s *StorageManager) getEncodingParams(kvIdx uint64, blobHash common.Hash) (uint64, common.Hash) { shardIdx := kvIdx >> s.KvEntriesBits() encodeType, _ := s.GetShardEncodeType(shardIdx) miner, _ := s.GetShardMiner(shardIdx) encodeKey := CalcEncodeKey(blobHash, kvIdx, miner) - return DecodeChunk(size, blob, encodeType, encodeKey) + return encodeType, encodeKey } // DownloadFinished This function will be called when the node found new block are finalized, and it will update the From 9af73b0dbaac4827c004eaaefd8f7d67b4b9d553 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 11 Jul 2024 14:35:16 +0800 Subject: [PATCH 28/61] fix encoding err --- ethstorage/downloader/downloader.go | 2 +- ethstorage/node/es_api.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index bef1b918..f498436b 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -376,7 +376,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob } // encode blobs so that miner can do sampling directly from cache - elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64(), elBlob.kvSize.Uint64()) + elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64(), s.sm.MaxKvSize()) blobs = append(blobs, *elBlob) s.log.Info("Download range", "blockNumber", elBlock.number, "kvIdx", elBlob.kvIndex) } diff --git a/ethstorage/node/es_api.go b/ethstorage/node/es_api.go index e5376180..ef6b3dd3 100644 --- a/ethstorage/node/es_api.go +++ b/ethstorage/node/es_api.go @@ -65,7 +65,7 @@ func (api *esAPI) GetBlob(kvIndex uint64, blobHash common.Hash, decodeType Decod return nil, ethereum.NotFound } } else { - blob = api.sm.DecodeBlob(blob, blobHash, kvIndex, size) + blob = api.sm.DecodeBlob(blob, blobHash, kvIndex, api.sm.MaxKvSize()) } ret := blob From b9ebdfd80a5ff5316fa0cbe840cd464d352c10ec Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 10:02:53 +0800 Subject: [PATCH 29/61] fix test --- ethstorage/downloader/blob_disk_cache.go | 2 +- ethstorage/downloader/blob_disk_cache_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index a422d745..9b7d26f3 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -90,7 +90,7 @@ func (c *BlobDiskCache) Blobs(hash common.Hash) []blob { func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { blob := c.getBlobByIndex(idx) - if blob != nil || + if blob != nil && bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { return blob.data } diff --git a/ethstorage/downloader/blob_disk_cache_test.go b/ethstorage/downloader/blob_disk_cache_test.go index 1250af7e..72e330d3 100644 --- a/ethstorage/downloader/blob_disk_cache_test.go +++ b/ethstorage/downloader/blob_disk_cache_test.go @@ -15,6 +15,7 @@ func TestBlobCache(t *testing.T) { t.Fatalf("Failed to create datadir: %v", err) } t.Logf("datadir %s", datadir) + defer os.RemoveAll(datadir) cache := NewBlobDiskCache() err = cache.Init(datadir) From 04c03c1ff689bea7e14aa56a62ba2165c41ac434 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 11:19:41 +0800 Subject: [PATCH 30/61] debug --- ethstorage/downloader/blob_disk_cache.go | 34 +++++++++------ ethstorage/downloader/blob_disk_cache_test.go | 41 +++++++++++++++---- ethstorage/node/node.go | 2 +- 3 files changed, 56 insertions(+), 21 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 9b7d26f3..0b1eac0d 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -5,6 +5,7 @@ package downloader import ( "bytes" + "fmt" "os" "path/filepath" "sync" @@ -26,22 +27,26 @@ const ( type BlobDiskCache struct { store billy.Database lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries id + lg log.Logger mu sync.RWMutex } -func NewBlobDiskCache() *BlobDiskCache { +func NewBlobDiskCache(lg log.Logger) *BlobDiskCache { return &BlobDiskCache{ lookup: make(map[common.Hash]uint64), + lg: lg, } } func (c *BlobDiskCache) Init(datadir string) error { cbdir := filepath.Join(datadir, blobCacheDir) if err := os.MkdirAll(cbdir, 0700); err != nil { + c.lg.Error("Failed to create cache directory", "dir", cbdir, "err", err) return err } store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) if err != nil { + c.lg.Error("Failed to open cache directory", "dir", cbdir, "err", err) return err } c.store = store @@ -51,12 +56,12 @@ func (c *BlobDiskCache) Init(datadir string) error { func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { rlpBlock, err := rlp.EncodeToBytes(block) if err != nil { - log.Error("Failed to encode transaction for storage", "hash", block.hash, "err", err) + c.lg.Error("Failed to encode blockBlobs into RLP", "block", block.number, "err", err) return err } id, err := c.store.Put(rlpBlock) if err != nil { - log.Error("Failed to write blob into storage", "hash", block.hash, "err", err) + c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } @@ -64,7 +69,7 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.lookup[block.hash] = id c.mu.Unlock() - log.Info("Set blockBlobs to cache", "id", id, "block", block.number) + c.lg.Debug("Set blockBlobs to cache", "id", id, "block", block.number) return nil } @@ -75,12 +80,11 @@ func (c *BlobDiskCache) Blobs(hash common.Hash) []blob { if !ok { return nil } - log.Info("Blobs from cache", "hash", hash, "id", id) block, err := c.getBlockBlobsById(id) if err != nil { return nil } - + c.lg.Info("Blobs from cache", "block", block.number, "id", id) res := []blob{} for _, blob := range block.blobs { res = append(res, *blob) @@ -133,15 +137,15 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { for hash, id := range c.lookup { block, err := c.getBlockBlobsById(id) if err != nil { - log.Warn("Failed to get block from id", "id", id, "err", err) + c.lg.Warn("Failed to get block from id", "id", id, "err", err) continue } if block.number <= finalized { if err := c.store.Delete(id); err != nil { - log.Error("Failed to delete block from id", "id", id, "err", err) + c.lg.Error("Failed to delete block from id", "id", id, "err", err) } delete(c.lookup, hash) - log.Info("Cleanup deleted", "finalized", finalized, "block", block.number, "id", id) + c.lg.Info("Cleanup deleted", "finalized", finalized, "block", block.number, "id", id) } } } @@ -149,19 +153,24 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { func (c *BlobDiskCache) getBlockBlobsById(id uint64) (*blockBlobs, error) { data, err := c.store.Get(id) if err != nil { - log.Error("Failed to get block from id", "id", id, "err", err) + c.lg.Error("Failed to get block from id", "id", id, "err", err) return nil, err } + if len(data) == 0 { + c.lg.Warn("BlockBlobs not found", "id", id) + return nil, fmt.Errorf("not found: id=%d", id) + } item := new(blockBlobs) if err := rlp.DecodeBytes(data, item); err != nil { - log.Error("Failed to decode block", "id", id, "err", err) + c.lg.Error("Failed to decode block", "id", id, "err", err) return nil, err } - log.Debug("Get blockBlobs by id", "id", id, "blockBlobs", item) + c.lg.Debug("Get blockBlobs by id", "id", id, "blockBlobs", item) return item, nil } func (c *BlobDiskCache) Close() error { + c.lg.Warn("Closing BlobDiskCache") return c.store.Close() } @@ -173,7 +182,6 @@ func newSlotter() func() (uint32, bool) { return func() (size uint32, done bool) { slotsize += blobSize finished := slotsize >= maxBlobsPerTransaction*blobSize - log.Debug("new slotter", "slotSize", slotsize, "finished", finished) return slotsize, finished } } diff --git a/ethstorage/downloader/blob_disk_cache_test.go b/ethstorage/downloader/blob_disk_cache_test.go index 72e330d3..a6889211 100644 --- a/ethstorage/downloader/blob_disk_cache_test.go +++ b/ethstorage/downloader/blob_disk_cache_test.go @@ -1,10 +1,12 @@ package downloader import ( + "bytes" "os" "path/filepath" - "reflect" "testing" + + "github.com/ethstorage/go-ethstorage/ethstorage/log" ) func TestBlobCache(t *testing.T) { @@ -15,8 +17,21 @@ func TestBlobCache(t *testing.T) { t.Fatalf("Failed to create datadir: %v", err) } t.Logf("datadir %s", datadir) - defer os.RemoveAll(datadir) - cache := NewBlobDiskCache() + cache := NewBlobDiskCache(log.NewLogger(log.CLIConfig{ + Level: "debug", + Format: "text", + })) + + defer func() { + err := cache.Close() + if err != nil { + t.Fatalf("Failed to close BlobCache: %v", err) + } + err = os.RemoveAll(datadir) + if err != nil { + t.Fatalf("Failed to remove datadir: %v", err) + } + }() err = cache.Init(datadir) if err != nil { @@ -40,8 +55,8 @@ func TestBlobCache(t *testing.T) { for i, blob := range block.blobs { blobData := cache.GetKeyValueByIndex(uint64(i), blob.hash) - if !reflect.DeepEqual(blobData, blob.data) { - t.Fatalf("Unexpected blob data at index %d: got %+v, want %+v", i, blobData, blob.data) + if !bytes.Equal(blobData, blob.data) { + t.Fatalf("Unexpected blob data at index %d: got %x, want %x", i, blobData, blob.data) } } @@ -51,10 +66,22 @@ func TestBlobCache(t *testing.T) { t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) } - err = cache.Close() + block, err = newBlockBlobs(20, 2) if err != nil { - t.Fatalf("Failed to close BlobCache: %v", err) + t.Fatalf("Failed to create new block blobs: %v", err) } + + err = cache.SetBlockBlobs(block) + if err != nil { + t.Fatalf("Failed to set block blobs: %v", err) + } + + cache.Cleanup(15) + blobsAfterCleanup = cache.Blobs(block.hash) + if len(blobsAfterCleanup) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) + } + } func TestNewSlotter(t *testing.T) { diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 9903aa2b..dc97b396 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -136,7 +136,7 @@ func (n *EsNode) init(ctx context.Context, cfg *Config) error { } func (n *EsNode) initL2(ctx context.Context, cfg *Config) error { - n.blobCache = downloader.NewBlobDiskCache() + n.blobCache = downloader.NewBlobDiskCache(n.log) n.downloader = downloader.NewDownloader( n.l1Source, n.l1Beacon, From 80146519c014e710070bb336a6245b40ce4c23af Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 15:36:33 +0800 Subject: [PATCH 31/61] fix test --- ethstorage/downloader/blob_cache_test.go | 115 ++++++++++++++++-- ethstorage/downloader/blob_disk_cache.go | 7 +- ethstorage/downloader/blob_disk_cache_test.go | 101 --------------- 3 files changed, 110 insertions(+), 113 deletions(-) delete mode 100644 ethstorage/downloader/blob_disk_cache_test.go diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 613ad98c..764f550a 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -8,17 +8,20 @@ import ( "fmt" "math/big" "os" + "path/filepath" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethstorage/go-ethstorage/ethstorage" + "github.com/ethstorage/go-ethstorage/ethstorage/log" "github.com/protolambda/go-kzg/eth" ) var ( - bc BlobCache + cache BlobCache kvHashes []common.Hash + datadir string fileName = "test_shard_0.dat" blobData = "blob data of kvIndex %d" minerAddr = common.BigToAddress(common.Big1) @@ -27,18 +30,82 @@ var ( shardID = uint64(0) ) -func init() { - bc = NewBlobMemCache() +func TestNewSlotter(t *testing.T) { + slotter := newSlotter() + var lastSize uint32 + for i := 0; i < 10; i++ { + size, done := slotter() + lastSize = size + if done { + break + } + } + expected := uint32(maxBlobsPerTransaction * blobSize) + if lastSize != expected { + t.Errorf("Slotter returned incorrect total size: got %d, want %d", lastSize, expected) + } } -func TestBlobCache_Encoding(t *testing.T) { +func TestDiskBlobCache(t *testing.T) { + setup(t) + defer teardown(t) + + block, err := newBlockBlobs(10, 4) + if err != nil { + t.Fatalf("Failed to create new block blobs: %v", err) + } + + err = cache.SetBlockBlobs(block) + if err != nil { + t.Fatalf("Failed to set block blobs: %v", err) + } + + blobs := cache.Blobs(block.hash) + if len(blobs) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs: got %d, want %d", len(blobs), len(block.blobs)) + } + + for i, blob := range block.blobs { + blobData := cache.GetKeyValueByIndex(uint64(i), blob.hash) + if !bytes.Equal(blobData, blob.data) { + t.Fatalf("Unexpected blob data at index %d: got %x, want %x", i, blobData, blob.data) + } + } + + cache.Cleanup(5) + blobsAfterCleanup := cache.Blobs(block.hash) + if len(blobsAfterCleanup) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) + } + + block, err = newBlockBlobs(20, 6) + if err != nil { + t.Fatalf("Failed to create new block blobs: %v", err) + } + + err = cache.SetBlockBlobs(block) + if err != nil { + t.Fatalf("Failed to set block blobs: %v", err) + } + + cache.Cleanup(15) + blobsAfterCleanup = cache.Blobs(block.hash) + if len(blobsAfterCleanup) != len(block.blobs) { + t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) + } +} + +func TestEncoding(t *testing.T) { + setup(t) + defer teardown(t) + blockBlobsParams := []struct { blockNum uint64 blobLen uint64 }{ {0, 1}, {1, 5}, - {222, 6}, + // {222, 6}, {1000, 4}, {12345, 2}, {2000000, 3}, @@ -66,14 +133,16 @@ func TestBlobCache_Encoding(t *testing.T) { for i, b := range bb.blobs { bb.blobs[i].data = sm.EncodeBlob(b.data, b.hash, b.kvIndex.Uint64(), kvSize) } - bc.SetBlockBlobs(bb) + if err := cache.SetBlockBlobs(bb); err != nil { + t.Fatalf("failed to set block blobs: %v", err) + } } // load from cache and verify for i, kvHash := range kvHashes { kvIndex := uint64(i) t.Run(fmt.Sprintf("test kv: %d", i), func(t *testing.T) { - blobEncoded := bc.GetKeyValueByIndexUnchecked(kvIndex) + blobEncoded := cache.GetKeyValueByIndexUnchecked(kvIndex) blobDecoded := sm.DecodeBlob(blobEncoded, kvHash, kvIndex, kvSize) bytesWant := []byte(fmt.Sprintf(blobData, kvIndex)) if !bytes.Equal(blobDecoded[:len(bytesWant)], bytesWant) { @@ -109,3 +178,35 @@ func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { } return block, nil } + +func setup(t *testing.T) { + // cache = NewBlobMemCache() + tmpDir := t.TempDir() + datadir = filepath.Join(tmpDir, "datadir") + err := os.MkdirAll(datadir, 0700) + if err != nil { + t.Fatalf("Failed to create datadir: %v", err) + } + t.Logf("datadir %s", datadir) + cache = NewBlobDiskCache(log.NewLogger(log.CLIConfig{ + Level: "debug", + Format: "text", + })) + + err = cache.Init(datadir) + if err != nil { + t.Fatalf("Failed to initialize BlobCache: %v", err) + } +} + +func teardown(t *testing.T) { + err := cache.Close() + if err != nil { + t.Fatalf("Failed to close BlobCache: %v", err) + } + err = os.RemoveAll(datadir) + if err != nil { + t.Fatalf("Failed to remove datadir: %v", err) + } + kvHashes = nil +} diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 0b1eac0d..13913679 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -115,7 +115,7 @@ func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { for _, id := range c.lookup { block, err := c.getBlockBlobsById(id) - if err != nil { + if err != nil || block == nil { return nil } for _, blob := range block.blobs { @@ -127,9 +127,6 @@ func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { return nil } -// TODO: @Qiang An edge case that may need to be handled when Ethereum block is NOT finalized for a long time -// We may need to add a counter in SetBlockBlobs(), if the counter is greater than a threshold which means -// there has been a long time after last Cleanup, so we need to Cleanup anyway in SetBlockBlobs. func (c *BlobDiskCache) Cleanup(finalized uint64) { c.mu.Lock() defer c.mu.Unlock() @@ -140,7 +137,7 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { c.lg.Warn("Failed to get block from id", "id", id, "err", err) continue } - if block.number <= finalized { + if block != nil && block.number <= finalized { if err := c.store.Delete(id); err != nil { c.lg.Error("Failed to delete block from id", "id", id, "err", err) } diff --git a/ethstorage/downloader/blob_disk_cache_test.go b/ethstorage/downloader/blob_disk_cache_test.go deleted file mode 100644 index a6889211..00000000 --- a/ethstorage/downloader/blob_disk_cache_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package downloader - -import ( - "bytes" - "os" - "path/filepath" - "testing" - - "github.com/ethstorage/go-ethstorage/ethstorage/log" -) - -func TestBlobCache(t *testing.T) { - tmpDir := t.TempDir() - datadir := filepath.Join(tmpDir, "datadir") - err := os.MkdirAll(datadir, 0700) - if err != nil { - t.Fatalf("Failed to create datadir: %v", err) - } - t.Logf("datadir %s", datadir) - cache := NewBlobDiskCache(log.NewLogger(log.CLIConfig{ - Level: "debug", - Format: "text", - })) - - defer func() { - err := cache.Close() - if err != nil { - t.Fatalf("Failed to close BlobCache: %v", err) - } - err = os.RemoveAll(datadir) - if err != nil { - t.Fatalf("Failed to remove datadir: %v", err) - } - }() - - err = cache.Init(datadir) - if err != nil { - t.Fatalf("Failed to initialize BlobCache: %v", err) - } - - block, err := newBlockBlobs(10, 4) - if err != nil { - t.Fatalf("Failed to create new block blobs: %v", err) - } - - err = cache.SetBlockBlobs(block) - if err != nil { - t.Fatalf("Failed to set block blobs: %v", err) - } - - blobs := cache.Blobs(block.hash) - if len(blobs) != len(block.blobs) { - t.Fatalf("Unexpected number of blobs: got %d, want %d", len(blobs), len(block.blobs)) - } - - for i, blob := range block.blobs { - blobData := cache.GetKeyValueByIndex(uint64(i), blob.hash) - if !bytes.Equal(blobData, blob.data) { - t.Fatalf("Unexpected blob data at index %d: got %x, want %x", i, blobData, blob.data) - } - } - - cache.Cleanup(5) - blobsAfterCleanup := cache.Blobs(block.hash) - if len(blobsAfterCleanup) != len(block.blobs) { - t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) - } - - block, err = newBlockBlobs(20, 2) - if err != nil { - t.Fatalf("Failed to create new block blobs: %v", err) - } - - err = cache.SetBlockBlobs(block) - if err != nil { - t.Fatalf("Failed to set block blobs: %v", err) - } - - cache.Cleanup(15) - blobsAfterCleanup = cache.Blobs(block.hash) - if len(blobsAfterCleanup) != len(block.blobs) { - t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) - } - -} - -func TestNewSlotter(t *testing.T) { - slotter := newSlotter() - var lastSize uint32 - for i := 0; i < 10; i++ { - size, done := slotter() - lastSize = size - if done { - break - } - } - expected := uint32(maxBlobsPerTransaction * blobSize) - if lastSize != expected { - t.Errorf("Slotter returned incorrect total size: got %d, want %d", lastSize, expected) - } -} From e4602efd2b4a6c2ae3f3d4a991466ced847318eb Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 16:47:04 +0800 Subject: [PATCH 32/61] refactor --- ethstorage/downloader/blob_disk_cache.go | 23 +++++++++-------------- ethstorage/downloader/blob_mem_cache.go | 4 ---- ethstorage/downloader/downloader.go | 8 ++------ ethstorage/node/node.go | 17 ++++++++++------- 4 files changed, 21 insertions(+), 31 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 13913679..9557bf3e 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -31,26 +31,21 @@ type BlobDiskCache struct { mu sync.RWMutex } -func NewBlobDiskCache(lg log.Logger) *BlobDiskCache { - return &BlobDiskCache{ - lookup: make(map[common.Hash]uint64), - lg: lg, - } -} - -func (c *BlobDiskCache) Init(datadir string) error { +func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { cbdir := filepath.Join(datadir, blobCacheDir) if err := os.MkdirAll(cbdir, 0700); err != nil { - c.lg.Error("Failed to create cache directory", "dir", cbdir, "err", err) - return err + lg.Crit("Failed to create cache directory", "dir", cbdir, "err", err) } store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) if err != nil { - c.lg.Error("Failed to open cache directory", "dir", cbdir, "err", err) - return err + lg.Crit("Failed to open cache directory", "dir", cbdir, "err", err) + } + lg.Info("BlobDiskCache initialized", "dir", cbdir) + return &BlobDiskCache{ + store: store, + lookup: make(map[common.Hash]uint64), + lg: lg, } - c.store = store - return nil } func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { diff --git a/ethstorage/downloader/blob_mem_cache.go b/ethstorage/downloader/blob_mem_cache.go index 55ca2ef5..32db298f 100644 --- a/ethstorage/downloader/blob_mem_cache.go +++ b/ethstorage/downloader/blob_mem_cache.go @@ -23,10 +23,6 @@ func NewBlobMemCache() *BlobMemCache { } } -func (c *BlobMemCache) Init(datadir string) error { - return nil -} - func (c *BlobMemCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() defer c.mu.Unlock() diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index d15f181d..ae1fdded 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -41,7 +41,6 @@ var ( ) type BlobCache interface { - Init(datadir string) error SetBlockBlobs(block *blockBlobs) error Blobs(hash common.Hash) []blob GetKeyValueByIndex(idx uint64, hash common.Hash) []byte @@ -190,9 +189,6 @@ func NewDownloader( // Start starts up the state loop. func (s *Downloader) Start(datadir string) error { - if err := s.Cache.Init(datadir); err != nil { - return err - } // user does NOT specify a download start in the flag if s.lastDownloadBlock == 0 { bs, err := s.db.Get(append(downloaderPrefix, lastDownloadKey...)) @@ -235,7 +231,7 @@ func (s *Downloader) Start(datadir string) error { func (s *Downloader) Close() error { s.done <- struct{}{} s.wg.Wait() - return s.Cache.Close() + return nil } func (s *Downloader) OnL1Finalized(finalized uint64) { @@ -451,7 +447,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob // encode blobs so that miner can do sampling directly from cache elBlob.data = s.sm.EncodeBlob(clBlob.Data, elBlob.hash, elBlob.kvIndex.Uint64(), s.sm.MaxKvSize()) blobs = append(blobs, *elBlob) - s.log.Info("Download range", "blockNumber", elBlock.number, "kvIdx", elBlob.kvIndex) + s.log.Info("Downloaded and encoded", "blockNumber", elBlock.number, "kvIdx", elBlob.kvIndex) } if toCache { if err := s.Cache.SetBlockBlobs(elBlock); err != nil { diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index dc97b396..42021b1a 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -41,12 +41,12 @@ type EsNode struct { l1FinalizedSub ethereum.Subscription // Subscription to get L1 Finalized blocks, a.k.a. justified data (polling) randaoHeadsSub ethereum.Subscription // Subscription to get randao heads (automatically re-subscribes on error) - randaoSource *eth.RandaoClient // RPC client to fetch randao from - l1Source *eth.PollingClient // L1 Client to fetch data from - l1Beacon *eth.BeaconClient // L1 Beacon Chain to fetch blobs from - daClient *eth.DAClient // L1 Data Availability Client - blobCache *downloader.BlobDiskCache // Cache for blobs - downloader *downloader.Downloader // L2 Engine to Sync + randaoSource *eth.RandaoClient // RPC client to fetch randao from + l1Source *eth.PollingClient // L1 Client to fetch data from + l1Beacon *eth.BeaconClient // L1 Beacon Chain to fetch blobs from + daClient *eth.DAClient // L1 Data Availability Client + blobCache downloader.BlobCache // Cache for blobs + downloader *downloader.Downloader // L2 Engine to Sync // l2Source *sources.EngineClient // L2 Execution Engine RPC bindings // rpcSync *sources.SyncClient // Alt-sync RPC client, optional (may be nil) server *rpcServer // RPC server hosting the rollup-node API @@ -136,7 +136,7 @@ func (n *EsNode) init(ctx context.Context, cfg *Config) error { } func (n *EsNode) initL2(ctx context.Context, cfg *Config) error { - n.blobCache = downloader.NewBlobDiskCache(n.log) + n.blobCache = downloader.NewBlobDiskCache(cfg.DataDir, n.log) n.downloader = downloader.NewDownloader( n.l1Source, n.l1Beacon, @@ -511,6 +511,9 @@ func (n *EsNode) Close() error { if n.miner != nil { n.miner.Close() } + if n.blobCache != nil { + n.blobCache.Close() + } if n.archiverAPI != nil { n.archiverAPI.Stop(context.Background()) From fd7dcd35466eac89d3d4c2be0e13ab72197feae5 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 17:12:43 +0800 Subject: [PATCH 33/61] fix test --- ethstorage/downloader/blob_cache_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 764f550a..e450c549 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -188,15 +188,10 @@ func setup(t *testing.T) { t.Fatalf("Failed to create datadir: %v", err) } t.Logf("datadir %s", datadir) - cache = NewBlobDiskCache(log.NewLogger(log.CLIConfig{ + cache = NewBlobDiskCache(datadir, log.NewLogger(log.CLIConfig{ Level: "debug", Format: "text", })) - - err = cache.Init(datadir) - if err != nil { - t.Fatalf("Failed to initialize BlobCache: %v", err) - } } func teardown(t *testing.T) { From 9e5c75130e8badb350e1ffb01e5842cea84c9823 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 12 Jul 2024 19:23:06 +0800 Subject: [PATCH 34/61] test perf --- ethstorage/downloader/blob_cache_test.go | 35 ++++++++++++++++++++++++ ethstorage/downloader/blob_disk_cache.go | 14 +++++++--- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index e450c549..87fc4c49 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -9,7 +9,9 @@ import ( "math/big" "os" "path/filepath" + "runtime/pprof" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -152,6 +154,39 @@ func TestEncoding(t *testing.T) { } } +func TestPerf(t *testing.T) { + t.SkipNow() + + setup(t) + defer teardown(t) + + for i := uint64(0); i < 1000; i++ { + bb, err := newBlockBlobs(i, 6) + if err != nil { + t.Fatalf("failed to create block blobs: %v", err) + } + if err := cache.SetBlockBlobs(bb); err != nil { + t.Fatalf("failed to set block blobs: %v", err) + } + } + + cpuProfile, err := os.Create("cpu_profile.pprof") + if err != nil { + panic(err) + } + if err := pprof.StartCPUProfile(cpuProfile); err != nil { + panic(err) + } + defer pprof.StopCPUProfile() + + startTime := time.Now() + for i := range kvHashes { + kvIndex := uint64(i) + cache.GetKeyValueByIndexUnchecked(kvIndex) + } + t.Logf("Total spent %fs", time.Since(startTime).Seconds()) +} + func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { block := &blockBlobs{ number: blockNumber, diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 9557bf3e..c61c40c2 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -97,9 +97,16 @@ func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte } func (c *BlobDiskCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { - blob := c.getBlobByIndex(idx) - if blob != nil { - return blob.data + for _, id := range c.lookup { + block, err := c.getBlockBlobsById(id) + if err != nil || block == nil { + return nil + } + for _, blob := range block.blobs { + if blob.kvIndex.Uint64() == idx { + return blob.data + } + } } return nil } @@ -157,7 +164,6 @@ func (c *BlobDiskCache) getBlockBlobsById(id uint64) (*blockBlobs, error) { c.lg.Error("Failed to decode block", "id", id, "err", err) return nil, err } - c.lg.Debug("Get blockBlobs by id", "id", id, "blockBlobs", item) return item, nil } From a1370f7298e9021b3cbf868f1bf54dcd146586d6 Mon Sep 17 00:00:00 2001 From: syntrust Date: Mon, 15 Jul 2024 18:11:25 +0800 Subject: [PATCH 35/61] minor --- ethstorage/blobs/blob_reader.go | 1 - ethstorage/downloader/blob_disk_cache.go | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index f18d460c..7c5055f1 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -55,7 +55,6 @@ func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) kvIdx := sampleIdx >> sampleLenBits // get blob without checking commit since kvHash is not available if blob := n.cr.GetKeyValueByIndexUnchecked(kvIdx); blob != nil { - n.lg.Debug("Loaded blob from downloader cache", "kvIdx", kvIdx) sampleIdxInKv := sampleIdx % (1 << sampleLenBits) sampleSize := uint64(1 << es.SampleSizeBits) sampleIdxByte := sampleIdxInKv << es.SampleSizeBits diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index c61c40c2..3bd4056e 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -96,6 +96,7 @@ func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte return nil } +// Access without a lock or verification through a hash: only for miner sampling func (c *BlobDiskCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { for _, id := range c.lookup { block, err := c.getBlockBlobsById(id) From 180b70e99a30e9560abac15f0fec7de32e4ea5a5 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 16 Jul 2024 17:12:16 +0800 Subject: [PATCH 36/61] add lock to store --- ethstorage/downloader/blob_disk_cache.go | 124 ++++++++++++++++------- 1 file changed, 87 insertions(+), 37 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 3bd4056e..54e3110a 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -27,8 +27,9 @@ const ( type BlobDiskCache struct { store billy.Database lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries id + index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id + mu sync.RWMutex // protects store, lookup and index maps lg log.Logger - mu sync.RWMutex } func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { @@ -36,16 +37,63 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { if err := os.MkdirAll(cbdir, 0700); err != nil { lg.Crit("Failed to create cache directory", "dir", cbdir, "err", err) } - store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) + c := &BlobDiskCache{ + lookup: make(map[common.Hash]uint64), + index: make(map[uint64]uint64), + lg: lg, + } + var ( + fails []uint64 + totalSize uint32 + totalBlobs int + ) + indexer := func(id uint64, size uint32, data []byte) { + totalSize += size + blobs, err := c.parseBlockBlobs(id, data) + if err != nil { + fails = append(fails, id) + } + totalBlobs += blobs + } + + store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), indexer) if err != nil { lg.Crit("Failed to open cache directory", "dir", cbdir, "err", err) } - lg.Info("BlobDiskCache initialized", "dir", cbdir) - return &BlobDiskCache{ - store: store, - lookup: make(map[common.Hash]uint64), - lg: lg, + c.store = store + + if len(fails) > 0 { + log.Warn("Dropping invalidated cached entries", "ids", fails) + for _, id := range fails { + if err := c.store.Delete(id); err != nil { + log.Error("Failed to delete invalidated cached entry", "id", id, "err", err) + } + } + } + lg.Info("BlobDiskCache initialized", "dir", cbdir, "entries", len(c.lookup), "blobs", totalBlobs, "size", totalSize) + return c +} + +// parseBlockBlobs is a callback method on pool creation that gets called for +// each blockBlobs on disk to create the in-memory index. +func (c *BlobDiskCache) parseBlockBlobs(id uint64, data []byte) (int, error) { + bb := new(blockBlobs) + if err := rlp.DecodeBytes(data, bb); err != nil { + c.lg.Error("Failed to decode blockBlobs from RLP", "id", id, "err", err) + return 0, err } + + c.mu.Lock() + defer c.mu.Unlock() + blobs := 0 + c.lookup[bb.hash] = id + c.lg.Info("Indexing blockBlobs in cache", "block", bb.number, "hash", bb.hash, "id", id) + for _, b := range bb.blobs { + blobs++ + c.index[b.kvIndex.Uint64()] = id + c.lg.Info("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) + } + return blobs, nil } func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { @@ -54,29 +102,32 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.lg.Error("Failed to encode blockBlobs into RLP", "block", block.number, "err", err) return err } + c.mu.Lock() + defer c.mu.Unlock() + id, err := c.store.Put(rlpBlock) if err != nil { c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } - - c.mu.Lock() c.lookup[block.hash] = id - c.mu.Unlock() - - c.lg.Debug("Set blockBlobs to cache", "id", id, "block", block.number) + for _, b := range block.blobs { + c.index[b.kvIndex.Uint64()] = id + c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) + } + c.lg.Info("Set blockBlobs to cache", "block", block.number, "id", id) return nil } func (c *BlobDiskCache) Blobs(hash common.Hash) []blob { c.mu.RLock() id, ok := c.lookup[hash] - c.mu.RUnlock() if !ok { return nil } block, err := c.getBlockBlobsById(id) - if err != nil { + c.mu.RUnlock() + if err != nil || block == nil { return nil } c.lg.Info("Blobs from cache", "block", block.number, "id", id) @@ -96,35 +147,29 @@ func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte return nil } -// Access without a lock or verification through a hash: only for miner sampling +// Access without verification through a hash: only for miner sampling func (c *BlobDiskCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { - for _, id := range c.lookup { - block, err := c.getBlockBlobsById(id) - if err != nil || block == nil { - return nil - } - for _, blob := range block.blobs { - if blob.kvIndex.Uint64() == idx { - return blob.data - } - } + blob := c.getBlobByIndex(idx) + if blob != nil { + return blob.data } return nil } func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { c.mu.RLock() - defer c.mu.RUnlock() - - for _, id := range c.lookup { - block, err := c.getBlockBlobsById(id) - if err != nil || block == nil { - return nil - } - for _, blob := range block.blobs { - if blob.kvIndex.Uint64() == idx { - return blob - } + id, ok := c.index[idx] + if !ok { + return nil + } + block, err := c.getBlockBlobsById(id) + c.mu.RUnlock() + if err != nil || block == nil { + return nil + } + for _, blob := range block.blobs { + if blob != nil && blob.kvIndex.Uint64() == idx { + return blob } } return nil @@ -137,7 +182,7 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { for hash, id := range c.lookup { block, err := c.getBlockBlobsById(id) if err != nil { - c.lg.Warn("Failed to get block from id", "id", id, "err", err) + c.lg.Error("Failed to get block from id", "id", id, "err", err) continue } if block != nil && block.number <= finalized { @@ -145,6 +190,11 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { c.lg.Error("Failed to delete block from id", "id", id, "err", err) } delete(c.lookup, hash) + for _, blob := range block.blobs { + if blob != nil && blob.kvIndex != nil { + delete(c.index, blob.kvIndex.Uint64()) + } + } c.lg.Info("Cleanup deleted", "finalized", finalized, "block", block.number, "id", id) } } From 094d3a60c1741e89cfb01836264a66fe6550e814 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 16 Jul 2024 17:13:20 +0800 Subject: [PATCH 37/61] minor --- ethstorage/downloader/blob_cache_test.go | 35 ------------------------ ethstorage/node/node.go | 8 ++++-- 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 87fc4c49..e450c549 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -9,9 +9,7 @@ import ( "math/big" "os" "path/filepath" - "runtime/pprof" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -154,39 +152,6 @@ func TestEncoding(t *testing.T) { } } -func TestPerf(t *testing.T) { - t.SkipNow() - - setup(t) - defer teardown(t) - - for i := uint64(0); i < 1000; i++ { - bb, err := newBlockBlobs(i, 6) - if err != nil { - t.Fatalf("failed to create block blobs: %v", err) - } - if err := cache.SetBlockBlobs(bb); err != nil { - t.Fatalf("failed to set block blobs: %v", err) - } - } - - cpuProfile, err := os.Create("cpu_profile.pprof") - if err != nil { - panic(err) - } - if err := pprof.StartCPUProfile(cpuProfile); err != nil { - panic(err) - } - defer pprof.StopCPUProfile() - - startTime := time.Now() - for i := range kvHashes { - kvIndex := uint64(i) - cache.GetKeyValueByIndexUnchecked(kvIndex) - } - t.Logf("Total spent %fs", time.Since(startTime).Seconds()) -} - func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { block := &blockBlobs{ number: blockNumber, diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 42021b1a..68fe05ca 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -485,12 +485,13 @@ func (n *EsNode) Close() error { result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err)) } } - + n.log.Info("P2p closed") if n.downloader != nil { if err := n.downloader.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close downloader: %w", err)) } } + n.log.Info("Downloader closed") // if n.p2pSigner != nil { // if err := n.p2pSigner.Close(); err != nil { // result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %w", err)) @@ -511,8 +512,11 @@ func (n *EsNode) Close() error { if n.miner != nil { n.miner.Close() } + n.log.Info("Miner closed") if n.blobCache != nil { - n.blobCache.Close() + if err := n.blobCache.Close(); err != nil { + result = multierror.Append(result, fmt.Errorf("failed to close blob cache: %w", err)) + } } if n.archiverAPI != nil { From 20fec74363ae6ffc3cbafe669692b8f5acb31f6d Mon Sep 17 00:00:00 2001 From: qiang Date: Tue, 16 Jul 2024 17:51:20 +0800 Subject: [PATCH 38/61] support testnet --- run-l2.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/run-l2.sh b/run-l2.sh index 6e2350b6..620dcd82 100755 --- a/run-l2.sh +++ b/run-l2.sh @@ -138,8 +138,8 @@ data_dir="./es-data" storage_file_0="$data_dir/shard-0.dat" common_flags=" --datadir $data_dir \ - --l1.rpc http://142.132.154.16:8545 \ - --storage.l1contract 0x90a708C0dca081ca48a9851a8A326775155f87Fd \ + --l1.rpc http://65.109.20.29:8545 \ + --storage.l1contract 0x64003adbdf3014f7E38FC6BE752EB047b95da89A \ --storage.miner $ES_NODE_STORAGE_MINER \ " @@ -153,13 +153,12 @@ es_node_start=" --network devnet \ --miner.zkey $zkey_name \ --storage.files $storage_file_0 \ --signer.private-key $ES_NODE_SIGNER_PRIVATE_KEY \ - --da.url http://142.132.154.16:8888 \ + --da.url http://65.109.20.29:8888 \ --randao.url http://88.99.30.186:8545 \ --l1.block_time 2 \ --download.thread 32 \ --p2p.listen.udp 30305 \ --p2p.sync.concurrency 32 \ - --p2p.bootnodes enr:-Li4QGUAA21O-0pgqnGoBLwvvminrlDjfxhqL6DvXhfOtvNdK871LELAT1Nn-NAa3hUi0Wmb-VIj1qi6fnbyA9yp5RGGAZALHvLnimV0aHN0b3JhZ2XbAYDY15SQpwjA3KCBykiphRqKMmd1FV-H_cGAgmlkgnY0gmlwhEFtMpGJc2VjcDI1NmsxoQJ8_OUONb_H7RMF6kXzZWDut2xriJ5JeKnH2cnb8en0e4N0Y3CCJAaDdWRwgnZh \ $@" # create data file for shard 0 if not yet From 7321930854a24da68c1449b5d191887123341dab Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 16 Jul 2024 17:52:03 +0800 Subject: [PATCH 39/61] minor --- ethstorage/downloader/blob_disk_cache.go | 4 ++-- ethstorage/node/node.go | 3 --- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 54e3110a..9a4abadc 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -87,11 +87,11 @@ func (c *BlobDiskCache) parseBlockBlobs(id uint64, data []byte) (int, error) { defer c.mu.Unlock() blobs := 0 c.lookup[bb.hash] = id - c.lg.Info("Indexing blockBlobs in cache", "block", bb.number, "hash", bb.hash, "id", id) + c.lg.Debug("Indexing blockBlobs in cache", "block", bb.number, "hash", bb.hash, "id", id) for _, b := range bb.blobs { blobs++ c.index[b.kvIndex.Uint64()] = id - c.lg.Info("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) + c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) } return blobs, nil } diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index 68fe05ca..c08dfd9e 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -485,13 +485,11 @@ func (n *EsNode) Close() error { result = multierror.Append(result, fmt.Errorf("failed to close p2p node: %w", err)) } } - n.log.Info("P2p closed") if n.downloader != nil { if err := n.downloader.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close downloader: %w", err)) } } - n.log.Info("Downloader closed") // if n.p2pSigner != nil { // if err := n.p2pSigner.Close(); err != nil { // result = multierror.Append(result, fmt.Errorf("failed to close p2p signer: %w", err)) @@ -512,7 +510,6 @@ func (n *EsNode) Close() error { if n.miner != nil { n.miner.Close() } - n.log.Info("Miner closed") if n.blobCache != nil { if err := n.blobCache.Close(); err != nil { result = multierror.Append(result, fmt.Errorf("failed to close blob cache: %w", err)) From 6f82f50be9f46912cb2294047f5f3f8f866c4fd4 Mon Sep 17 00:00:00 2001 From: qiang Date: Wed, 17 Jul 2024 10:40:43 +0800 Subject: [PATCH 40/61] new boot node --- run-l2.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run-l2.sh b/run-l2.sh index 620dcd82..0b8ab4e4 100755 --- a/run-l2.sh +++ b/run-l2.sh @@ -159,6 +159,7 @@ es_node_start=" --network devnet \ --download.thread 32 \ --p2p.listen.udp 30305 \ --p2p.sync.concurrency 32 \ + --p2p.bootnodes enr:-Li4QA-fcxSHHu68uzHxsGnR8Q8lnvPir8L3cb5-RSq5fvU7cmxukZinZ9N-XRcnvWauQl6KK2tnlD3RZTwOxI4KgIaGAZC-hjTfimV0aHN0b3JhZ2XbAYDY15RkADrb3zAU9-OPxr51LrBHuV2omsGAgmlkgnY0gmlwhEFtcySJc2VjcDI1NmsxoQNY8raIsHIGPniQ738UiNmIvifax5L6R51YLPoflGzix4N0Y3CCJAaDdWRwgnZh \ $@" # create data file for shard 0 if not yet From 4e9587b61ddcfa1c7eb43eaf1605623ee3cc4f03 Mon Sep 17 00:00:00 2001 From: qiang Date: Wed, 17 Jul 2024 11:33:52 +0800 Subject: [PATCH 41/61] rpc change --- run-l2-rpc.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/run-l2-rpc.sh b/run-l2-rpc.sh index 812e30f1..f8f94b3c 100755 --- a/run-l2-rpc.sh +++ b/run-l2-rpc.sh @@ -8,8 +8,8 @@ data_dir="./es-data" storage_file_0="$data_dir/shard-0.dat" common_flags=" --datadir $data_dir \ - --l1.rpc http://142.132.154.16:8545 \ - --storage.l1contract 0x90a708C0dca081ca48a9851a8A326775155f87Fd \ + --l1.rpc http://65.109.20.29:8545 \ + --storage.l1contract 0x64003adbdf3014f7E38FC6BE752EB047b95da89A \ " # init shard 0 es_node_init="init --shard_index 0" @@ -18,13 +18,13 @@ es_node_init="init --shard_index 0" # TODO remove --network es_node_start=" --network devnet \ --storage.files $storage_file_0 \ - --da.url http://142.132.154.16:8888 \ + --da.url http://65.109.20.29:8888 \ --l1.block_time 2 \ --download.thread 32 \ --rpc.addr 0.0.0.0 \ --p2p.listen.udp 30305 \ --p2p.sync.concurrency 32 \ - --p2p.bootnodes enr:-Li4QGUAA21O-0pgqnGoBLwvvminrlDjfxhqL6DvXhfOtvNdK871LELAT1Nn-NAa3hUi0Wmb-VIj1qi6fnbyA9yp5RGGAZALHvLnimV0aHN0b3JhZ2XbAYDY15SQpwjA3KCBykiphRqKMmd1FV-H_cGAgmlkgnY0gmlwhEFtMpGJc2VjcDI1NmsxoQJ8_OUONb_H7RMF6kXzZWDut2xriJ5JeKnH2cnb8en0e4N0Y3CCJAaDdWRwgnZh \ + --p2p.bootnodes enr:-Li4QA-fcxSHHu68uzHxsGnR8Q8lnvPir8L3cb5-RSq5fvU7cmxukZinZ9N-XRcnvWauQl6KK2tnlD3RZTwOxI4KgIaGAZC-hjTfimV0aHN0b3JhZ2XbAYDY15RkADrb3zAU9-OPxr51LrBHuV2omsGAgmlkgnY0gmlwhEFtcySJc2VjcDI1NmsxoQNY8raIsHIGPniQ738UiNmIvifax5L6R51YLPoflGzix4N0Y3CCJAaDdWRwgnZh \ " # create data file for shard 0 if not yet if [ ! -e $storage_file_0 ]; then From e26e889f590fc38b105cf939e3c8dc2d32ec768e Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 17 Jul 2024 15:57:22 +0800 Subject: [PATCH 42/61] fix slotter --- ethstorage/downloader/blob_cache_test.go | 29 +++++++++++++++--------- ethstorage/downloader/blob_disk_cache.go | 25 ++++++++++++++++---- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index e450c549..9f7f81ff 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -35,20 +35,25 @@ func TestNewSlotter(t *testing.T) { var lastSize uint32 for i := 0; i < 10; i++ { size, done := slotter() - lastSize = size + // shelf0 is for block with 1 blob + if !(size > uint32((i+1)*blobSize) && size < uint32((i+2)*blobSize)) { + t.Errorf("Slotter returned incorrect size at shelf %d", i) + } if done { + lastSize = size break } } - expected := uint32(maxBlobsPerTransaction * blobSize) - if lastSize != expected { - t.Errorf("Slotter returned incorrect total size: got %d, want %d", lastSize, expected) + if lastSize/blobSize != maxBlobsPerTransaction { + t.Errorf("Slotter did not return correct last size") } } func TestDiskBlobCache(t *testing.T) { setup(t) - defer teardown(t) + t.Cleanup(func() { + teardown(t) + }) block, err := newBlockBlobs(10, 4) if err != nil { @@ -97,16 +102,18 @@ func TestDiskBlobCache(t *testing.T) { func TestEncoding(t *testing.T) { setup(t) - defer teardown(t) + t.Cleanup(func() { + teardown(t) + }) blockBlobsParams := []struct { blockNum uint64 blobLen uint64 }{ {0, 1}, - {1, 5}, - // {222, 6}, {1000, 4}, + {1, 5}, + {222, 6}, {12345, 2}, {2000000, 3}, } @@ -189,7 +196,7 @@ func setup(t *testing.T) { } t.Logf("datadir %s", datadir) cache = NewBlobDiskCache(datadir, log.NewLogger(log.CLIConfig{ - Level: "debug", + Level: "warn", Format: "text", })) } @@ -197,11 +204,11 @@ func setup(t *testing.T) { func teardown(t *testing.T) { err := cache.Close() if err != nil { - t.Fatalf("Failed to close BlobCache: %v", err) + t.Errorf("Failed to close BlobCache: %v", err) } err = os.RemoveAll(datadir) if err != nil { - t.Fatalf("Failed to remove datadir: %v", err) + t.Errorf("Failed to remove datadir: %v", err) } kvHashes = nil } diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 9a4abadc..1d91f068 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -223,14 +223,31 @@ func (c *BlobDiskCache) Close() error { return c.store.Close() } +var base = uint32(44) + // newSlotter creates a helper method for the Billy datastore that returns the // individual shelf sizes used to store blobs in. + +// | blobs | shelf size | data size| +// |--|--|--| +// | 1 | 131160 |131158| +// | 2 | 262276 |262275| +// | 3 | 393392 |393391| +// | 4 | 524508 |524505| +// | 5 | 655624 |655618| +// | 6 | 786740 |786734| + func newSlotter() func() (uint32, bool) { - var slotsize uint32 + var ( + slotsize uint32 = base + blobCount uint32 = 1 + ) return func() (size uint32, done bool) { - slotsize += blobSize - finished := slotsize >= maxBlobsPerTransaction*blobSize - return slotsize, finished + slotsize += blobSize + base + size = slotsize + done = blobCount == maxBlobsPerTransaction + blobCount++ + return } } From e496fb1772749db183ccf7bddacdc00b014d419a Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 17 Jul 2024 16:04:29 +0800 Subject: [PATCH 43/61] minor --- ethstorage/miner/miner_test.go | 4 ++-- integration_tests/node_mine_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index 5f3fdfc6..bc2e468d 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -72,8 +72,8 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageMgr, lg) - miner := New(defaultConfig, db, storageMgr, l1api, bq, &pvr, fd, lg) + br := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageMgr, lg) + miner := New(defaultConfig, db, storageMgr, l1api, br, &pvr, fd, lg) return miner } diff --git a/integration_tests/node_mine_test.go b/integration_tests/node_mine_test.go index aac8878a..ed2c24fc 100644 --- a/integration_tests/node_mine_test.go +++ b/integration_tests/node_mine_test.go @@ -84,8 +84,8 @@ func TestMining(t *testing.T) { lg, ) db := rawdb.NewMemoryDatabase() - bq := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageManager, lg) - mnr := miner.New(miningConfig, db, storageManager, l1api, bq, &pvr, feed, lg) + br := blobs.NewBlobReader(downloader.NewBlobMemCache(), storageManager, lg) + mnr := miner.New(miningConfig, db, storageManager, l1api, br, &pvr, feed, lg) lg.Info("Initialized miner") l1HeadsSub := event.ResubscribeErr(time.Second*10, func(ctx context.Context, err error) (event.Subscription, error) { From c3207d2eccf5457b1965084eed3b95634ce8a246 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 17 Jul 2024 18:17:02 +0800 Subject: [PATCH 44/61] minor --- ethstorage/downloader/blob_cache_test.go | 2 +- ethstorage/downloader/downloader.go | 2 +- ethstorage/node/node.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 9f7f81ff..866db4e9 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -35,7 +35,7 @@ func TestNewSlotter(t *testing.T) { var lastSize uint32 for i := 0; i < 10; i++ { size, done := slotter() - // shelf0 is for block with 1 blob + // shelf0 is for block with 1 blob, and so on. if !(size > uint32((i+1)*blobSize) && size < uint32((i+2)*blobSize)) { t.Errorf("Slotter returned incorrect size at shelf %d", i) } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index ae1fdded..66a18edc 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -188,7 +188,7 @@ func NewDownloader( } // Start starts up the state loop. -func (s *Downloader) Start(datadir string) error { +func (s *Downloader) Start() error { // user does NOT specify a download start in the flag if s.lastDownloadBlock == 0 { bs, err := s.db.Get(append(downloaderPrefix, lastDownloadKey...)) diff --git a/ethstorage/node/node.go b/ethstorage/node/node.go index c08dfd9e..5ec2af41 100644 --- a/ethstorage/node/node.go +++ b/ethstorage/node/node.go @@ -332,7 +332,7 @@ func (n *EsNode) Start(ctx context.Context, cfg *Config) error { n.miner.Start() } - if err := n.downloader.Start(cfg.DataDir); err != nil { + if err := n.downloader.Start(); err != nil { n.log.Error("Could not start a downloader", "err", err) return err } From 34287776caec9563244856306206f0658a484e30 Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 18 Jul 2024 11:55:15 +0800 Subject: [PATCH 45/61] fix lock --- ethstorage/downloader/blob_disk_cache.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 1d91f068..e59448bd 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -123,6 +123,7 @@ func (c *BlobDiskCache) Blobs(hash common.Hash) []blob { c.mu.RLock() id, ok := c.lookup[hash] if !ok { + c.mu.RUnlock() return nil } block, err := c.getBlockBlobsById(id) @@ -160,6 +161,7 @@ func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { c.mu.RLock() id, ok := c.index[idx] if !ok { + c.mu.RUnlock() return nil } block, err := c.getBlockBlobsById(id) From a37a0731898cff12c1e1cd64f4087a7b769d3fd5 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 23 Jul 2024 15:31:39 +0800 Subject: [PATCH 46/61] clean cache when close --- ethstorage/downloader/blob_disk_cache.go | 56 +++++------------------- 1 file changed, 11 insertions(+), 45 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index e59448bd..6f52d274 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -42,60 +42,17 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { index: make(map[uint64]uint64), lg: lg, } - var ( - fails []uint64 - totalSize uint32 - totalBlobs int - ) - indexer := func(id uint64, size uint32, data []byte) { - totalSize += size - blobs, err := c.parseBlockBlobs(id, data) - if err != nil { - fails = append(fails, id) - } - totalBlobs += blobs - } - store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), indexer) + store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) if err != nil { lg.Crit("Failed to open cache directory", "dir", cbdir, "err", err) } c.store = store - if len(fails) > 0 { - log.Warn("Dropping invalidated cached entries", "ids", fails) - for _, id := range fails { - if err := c.store.Delete(id); err != nil { - log.Error("Failed to delete invalidated cached entry", "id", id, "err", err) - } - } - } - lg.Info("BlobDiskCache initialized", "dir", cbdir, "entries", len(c.lookup), "blobs", totalBlobs, "size", totalSize) + lg.Info("BlobDiskCache initialized", "dir", cbdir) return c } -// parseBlockBlobs is a callback method on pool creation that gets called for -// each blockBlobs on disk to create the in-memory index. -func (c *BlobDiskCache) parseBlockBlobs(id uint64, data []byte) (int, error) { - bb := new(blockBlobs) - if err := rlp.DecodeBytes(data, bb); err != nil { - c.lg.Error("Failed to decode blockBlobs from RLP", "id", id, "err", err) - return 0, err - } - - c.mu.Lock() - defer c.mu.Unlock() - blobs := 0 - c.lookup[bb.hash] = id - c.lg.Debug("Indexing blockBlobs in cache", "block", bb.number, "hash", bb.hash, "id", id) - for _, b := range bb.blobs { - blobs++ - c.index[b.kvIndex.Uint64()] = id - c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) - } - return blobs, nil -} - func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { rlpBlock, err := rlp.EncodeToBytes(block) if err != nil { @@ -222,6 +179,15 @@ func (c *BlobDiskCache) getBlockBlobsById(id uint64) (*blockBlobs, error) { func (c *BlobDiskCache) Close() error { c.lg.Warn("Closing BlobDiskCache") + c.mu.Lock() + defer c.mu.Unlock() + for _, id := range c.lookup { + if err := c.store.Delete(id); err != nil { + c.lg.Warn("Failed to delete block from id", "id", id, "err", err) + } + } + c.lookup = nil + c.index = nil return c.store.Close() } From ac80f2e597baad98b8666b70400ad4c696acae55 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 23 Jul 2024 15:42:24 +0800 Subject: [PATCH 47/61] use block number instead of hash to prevent re-org --- ethstorage/downloader/blob_cache_test.go | 6 +++--- ethstorage/downloader/blob_disk_cache.go | 14 +++++++------- ethstorage/downloader/downloader.go | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 866db4e9..c8491eba 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -65,7 +65,7 @@ func TestDiskBlobCache(t *testing.T) { t.Fatalf("Failed to set block blobs: %v", err) } - blobs := cache.Blobs(block.hash) + blobs := cache.Blobs(block.number) if len(blobs) != len(block.blobs) { t.Fatalf("Unexpected number of blobs: got %d, want %d", len(blobs), len(block.blobs)) } @@ -78,7 +78,7 @@ func TestDiskBlobCache(t *testing.T) { } cache.Cleanup(5) - blobsAfterCleanup := cache.Blobs(block.hash) + blobsAfterCleanup := cache.Blobs(block.number) if len(blobsAfterCleanup) != len(block.blobs) { t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) } @@ -94,7 +94,7 @@ func TestDiskBlobCache(t *testing.T) { } cache.Cleanup(15) - blobsAfterCleanup = cache.Blobs(block.hash) + blobsAfterCleanup = cache.Blobs(block.number) if len(blobsAfterCleanup) != len(block.blobs) { t.Fatalf("Unexpected number of blobs after cleanup: got %d, want %d", len(blobsAfterCleanup), len(block.blobs)) } diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 6f52d274..2ddcc930 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -26,9 +26,9 @@ const ( type BlobDiskCache struct { store billy.Database - lookup map[common.Hash]uint64 // Lookup table mapping hashes to blob billy entries id - index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id - mu sync.RWMutex // protects store, lookup and index maps + lookup map[uint64]uint64 // Lookup table mapping block number to blob billy entries id + index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id + mu sync.RWMutex // protects store, lookup and index maps lg log.Logger } @@ -38,7 +38,7 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { lg.Crit("Failed to create cache directory", "dir", cbdir, "err", err) } c := &BlobDiskCache{ - lookup: make(map[common.Hash]uint64), + lookup: make(map[uint64]uint64), index: make(map[uint64]uint64), lg: lg, } @@ -67,7 +67,7 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } - c.lookup[block.hash] = id + c.lookup[block.number] = id for _, b := range block.blobs { c.index[b.kvIndex.Uint64()] = id c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) @@ -76,9 +76,9 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { return nil } -func (c *BlobDiskCache) Blobs(hash common.Hash) []blob { +func (c *BlobDiskCache) Blobs(number uint64) []blob { c.mu.RLock() - id, ok := c.lookup[hash] + id, ok := c.lookup[number] if !ok { c.mu.RUnlock() return nil diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 66a18edc..0555f11d 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -42,7 +42,7 @@ var ( type BlobCache interface { SetBlockBlobs(block *blockBlobs) error - Blobs(hash common.Hash) []blob + Blobs(number uint64) []blob GetKeyValueByIndex(idx uint64, hash common.Hash) []byte GetKeyValueByIndexUnchecked(idx uint64) []byte Cleanup(finalized uint64) @@ -401,7 +401,7 @@ func (s *Downloader) downloadRange(start int64, end int64, toCache bool) ([]blob blobs := []blob{} for _, elBlock := range elBlocks { // attempt to read the blobs from the cache first - res := s.Cache.Blobs(elBlock.hash) + res := s.Cache.Blobs(elBlock.number) if res != nil { blobs = append(blobs, res...) s.log.Info("Blob found in the cache, continue to the next block", "blockNumber", elBlock.number) From 489eb95b8b061e823d3746b23c5ec32768f0f0ec Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 23 Jul 2024 19:49:19 +0800 Subject: [PATCH 48/61] remove hash --- ethstorage/downloader/blob_cache_test.go | 1 - ethstorage/downloader/blob_disk_cache.go | 18 ++++++++---------- ethstorage/downloader/blob_mem_cache.go | 17 +++++++---------- ethstorage/downloader/downloader.go | 7 +------ 4 files changed, 16 insertions(+), 27 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index c8491eba..e356486d 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -162,7 +162,6 @@ func TestEncoding(t *testing.T) { func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { block := &blockBlobs{ number: blockNumber, - hash: common.BigToHash(new(big.Int).SetUint64(blockNumber)), blobs: make([]*blob, blobLen), } for i := uint64(0); i < blobLen; i++ { diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 2ddcc930..5f6b9478 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -191,28 +191,26 @@ func (c *BlobDiskCache) Close() error { return c.store.Close() } -var base = uint32(44) - // newSlotter creates a helper method for the Billy datastore that returns the // individual shelf sizes used to store blobs in. // | blobs | shelf size | data size| // |--|--|--| -// | 1 | 131160 |131158| -// | 2 | 262276 |262275| -// | 3 | 393392 |393391| -// | 4 | 524508 |524505| -// | 5 | 655624 |655618| -// | 6 | 786740 |786734| +// | 1 | 131128 |131125| +// | 2 | 262244 |262242| +// | 3 | 393360 |393358| +// | 4 | 524476 |524472| +// | 5 | 655592 |655585| +// | 6 | 786708 |786701| func newSlotter() func() (uint32, bool) { var ( - slotsize uint32 = base + slotsize uint32 = 12 blobCount uint32 = 1 ) return func() (size uint32, done bool) { - slotsize += blobSize + base + slotsize += blobSize + 44 size = slotsize done = blobCount == maxBlobsPerTransaction blobCount++ diff --git a/ethstorage/downloader/blob_mem_cache.go b/ethstorage/downloader/blob_mem_cache.go index 32db298f..285ca018 100644 --- a/ethstorage/downloader/blob_mem_cache.go +++ b/ethstorage/downloader/blob_mem_cache.go @@ -13,33 +13,33 @@ import ( ) type BlobMemCache struct { - blocks map[common.Hash]*blockBlobs + blocks map[uint64]*blockBlobs mu sync.RWMutex } func NewBlobMemCache() *BlobMemCache { return &BlobMemCache{ - blocks: map[common.Hash]*blockBlobs{}, + blocks: map[uint64]*blockBlobs{}, } } func (c *BlobMemCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() defer c.mu.Unlock() - c.blocks[block.hash] = block + c.blocks[block.number] = block return nil } -func (c *BlobMemCache) Blobs(hash common.Hash) []blob { +func (c *BlobMemCache) Blobs(number uint64) []blob { c.mu.RLock() defer c.mu.RUnlock() - if _, exist := c.blocks[hash]; !exist { + if _, exist := c.blocks[number]; !exist { return nil } res := []blob{} - for _, blob := range c.blocks[hash].blobs { + for _, blob := range c.blocks[number].blobs { res = append(res, *blob) } return res @@ -88,9 +88,6 @@ func (c *BlobMemCache) Cleanup(finalized uint64) { } func (c *BlobMemCache) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - c.blocks = map[common.Hash]*blockBlobs{} + c.blocks = nil return nil } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 0555f11d..68350a2e 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -119,19 +119,17 @@ func (b *blob) DecodeRLP(s *rlp.Stream) error { type blockBlobs struct { timestamp uint64 number uint64 - hash common.Hash blobs []*blob } func (b *blockBlobs) String() string { - return fmt.Sprintf("blockBlobs{number: %d, timestamp: %d, hash: %x, blobs: %d}", b.number, b.timestamp, b.hash, len(b.blobs)) + return fmt.Sprintf("blockBlobs{number: %d, timestamp: %d, blobs: %d}", b.number, b.timestamp, len(b.blobs)) } func (bb *blockBlobs) EncodeRLP(w io.Writer) error { return rlp.Encode(w, []interface{}{ bb.timestamp, bb.number, - bb.hash, bb.blobs, }) } @@ -140,7 +138,6 @@ func (bb *blockBlobs) DecodeRLP(s *rlp.Stream) error { var decodedData struct { Timestamp uint64 Number uint64 - Hash common.Hash Blobs []*blob } @@ -150,7 +147,6 @@ func (bb *blockBlobs) DecodeRLP(s *rlp.Stream) error { bb.timestamp = decodedData.Timestamp bb.number = decodedData.Number - bb.hash = decodedData.Hash bb.blobs = decodedData.Blobs return nil @@ -496,7 +492,6 @@ func (s *Downloader) eventsToBlocks(events []types.Log) ([]*blockBlobs, error) { blocks = append(blocks, &blockBlobs{ timestamp: res.Time, number: event.BlockNumber, - hash: event.BlockHash, blobs: []*blob{}, }) } From 0f679787f2d5b123f041b74ff607e8787acaf46a Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 24 Jul 2024 18:40:46 +0800 Subject: [PATCH 49/61] no lock for store --- ethstorage/downloader/blob_disk_cache.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 5f6b9478..5b5c59eb 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -28,7 +28,7 @@ type BlobDiskCache struct { store billy.Database lookup map[uint64]uint64 // Lookup table mapping block number to blob billy entries id index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id - mu sync.RWMutex // protects store, lookup and index maps + mu sync.RWMutex // protects lookup and index maps lg log.Logger } @@ -59,19 +59,18 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.lg.Error("Failed to encode blockBlobs into RLP", "block", block.number, "err", err) return err } - c.mu.Lock() - defer c.mu.Unlock() - id, err := c.store.Put(rlpBlock) if err != nil { c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } + c.mu.Lock() c.lookup[block.number] = id for _, b := range block.blobs { c.index[b.kvIndex.Uint64()] = id c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) } + c.mu.Unlock() c.lg.Info("Set blockBlobs to cache", "block", block.number, "id", id) return nil } @@ -79,12 +78,11 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { func (c *BlobDiskCache) Blobs(number uint64) []blob { c.mu.RLock() id, ok := c.lookup[number] + c.mu.RUnlock() if !ok { - c.mu.RUnlock() return nil } block, err := c.getBlockBlobsById(id) - c.mu.RUnlock() if err != nil || block == nil { return nil } @@ -117,12 +115,11 @@ func (c *BlobDiskCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { c.mu.RLock() id, ok := c.index[idx] + c.mu.RUnlock() if !ok { - c.mu.RUnlock() return nil } block, err := c.getBlockBlobsById(id) - c.mu.RUnlock() if err != nil || block == nil { return nil } From ccd03c83a8e9c051e1ee35ae9ac241859522fe5a Mon Sep 17 00:00:00 2001 From: syntrust Date: Thu, 25 Jul 2024 14:49:49 +0800 Subject: [PATCH 50/61] fix ut --- ethstorage/miner/miner_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ethstorage/miner/miner_test.go b/ethstorage/miner/miner_test.go index bc2e468d..9fe16090 100644 --- a/ethstorage/miner/miner_test.go +++ b/ethstorage/miner/miner_test.go @@ -69,6 +69,14 @@ func newMiner(t *testing.T, storageMgr *es.StorageManager, client *eth.PollingCl } l1api := NewL1MiningAPI(client, nil, lg) zkWorkingDir, _ := filepath.Abs("../prover") + zkey := filepath.Join(zkWorkingDir, prover.SnarkLib, defaultConfig.ZKeyFileName) + if _, err := os.Stat(zkey); os.IsNotExist(err) { + _, err := os.Create(zkey) + if err != nil { + t.Fatalf("Create failed %v", err) + } + defer os.Remove(zkey) + } pvr := prover.NewKZGPoseidonProver(zkWorkingDir, defaultConfig.ZKeyFileName, defaultConfig.ZKProverMode, defaultConfig.ZKProverImpl, lg) fd := new(event.Feed) db := rawdb.NewMemoryDatabase() From ce0367cc9cfd399e8c4fb671e0dd2e9a9b8f9e42 Mon Sep 17 00:00:00 2001 From: syntrust Date: Fri, 26 Jul 2024 19:40:21 +0800 Subject: [PATCH 51/61] get sample by billy --- ethstorage/blobs/blob_reader.go | 11 +- ethstorage/downloader/blob_disk_cache.go | 196 ++++++++++------------- ethstorage/downloader/downloader.go | 59 +------ go.mod | 2 +- go.sum | 6 +- 5 files changed, 97 insertions(+), 177 deletions(-) diff --git a/ethstorage/blobs/blob_reader.go b/ethstorage/blobs/blob_reader.go index 7c5055f1..5368ead7 100644 --- a/ethstorage/blobs/blob_reader.go +++ b/ethstorage/blobs/blob_reader.go @@ -13,7 +13,7 @@ import ( type BlobCacheReader interface { GetKeyValueByIndex(index uint64, hash common.Hash) []byte - GetKeyValueByIndexUnchecked(index uint64) []byte + GetSampleData(kvIndex, sampleIndexInKv uint64) []byte } // BlobReader provides unified interface for the miner to read blobs and samples @@ -53,12 +53,9 @@ func (n *BlobReader) GetBlob(kvIdx uint64, kvHash common.Hash) ([]byte, error) { func (n *BlobReader) ReadSample(shardIdx, sampleIdx uint64) (common.Hash, error) { sampleLenBits := n.sm.MaxKvSizeBits() - es.SampleSizeBits kvIdx := sampleIdx >> sampleLenBits - // get blob without checking commit since kvHash is not available - if blob := n.cr.GetKeyValueByIndexUnchecked(kvIdx); blob != nil { - sampleIdxInKv := sampleIdx % (1 << sampleLenBits) - sampleSize := uint64(1 << es.SampleSizeBits) - sampleIdxByte := sampleIdxInKv << es.SampleSizeBits - sample := blob[sampleIdxByte : sampleIdxByte+sampleSize] + sampleIdxInKv := sampleIdx % (1 << sampleLenBits) + + if sample := n.cr.GetSampleData(kvIdx, sampleIdxInKv); sample != nil { return common.BytesToHash(sample), nil } diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 5b5c59eb..0a88c3d2 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -5,7 +5,7 @@ package downloader import ( "bytes" - "fmt" + "math/big" "os" "path/filepath" "sync" @@ -13,9 +13,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" + "github.com/ethstorage/billy" "github.com/ethstorage/go-ethstorage/ethstorage" - "github.com/holiman/billy" ) const ( @@ -24,11 +23,24 @@ const ( blobCacheDir = "cached_blobs" ) +type blockBlobsCached struct { + timestamp uint64 + number uint64 + blobs []*blobCached +} + +type blobCached struct { + kvIndex *big.Int + kvSize *big.Int + hash common.Hash + dataId uint64 +} + type BlobDiskCache struct { store billy.Database - lookup map[uint64]uint64 // Lookup table mapping block number to blob billy entries id - index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id - mu sync.RWMutex // protects lookup and index maps + lookup map[uint64]*blockBlobsCached // Lookup table mapping block number to blockBlob + index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id + mu sync.RWMutex // protects lookup and index maps lg log.Logger } @@ -38,7 +50,7 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { lg.Crit("Failed to create cache directory", "dir", cbdir, "err", err) } c := &BlobDiskCache{ - lookup: make(map[uint64]uint64), + lookup: make(map[uint64]*blockBlobsCached), index: make(map[uint64]uint64), lg: lg, } @@ -54,133 +66,124 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { } func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { - rlpBlock, err := rlp.EncodeToBytes(block) - if err != nil { - c.lg.Error("Failed to encode blockBlobs into RLP", "block", block.number, "err", err) - return err - } - id, err := c.store.Put(rlpBlock) - if err != nil { - c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) - return err - } c.mu.Lock() - c.lookup[block.number] = id + defer c.mu.Unlock() + + var blobIds []uint64 + var bcs []*blobCached for _, b := range block.blobs { + id, err := c.store.Put(b.data) + if err != nil { + c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) + return err + } + blobIds = append(blobIds, id) c.index[b.kvIndex.Uint64()] = id - c.lg.Debug("Indexing blob in cache", "kvIdx", b.kvIndex, "hash", b.hash, "id", id) - } - c.mu.Unlock() - c.lg.Info("Set blockBlobs to cache", "block", block.number, "id", id) + bcs = append(bcs, &blobCached{ + kvIndex: b.kvIndex, + kvSize: b.kvSize, + hash: b.hash, + dataId: id, + }) + } + c.lookup[block.number] = &blockBlobsCached{ + timestamp: block.timestamp, + number: block.number, + blobs: bcs, + } + c.lg.Info("Set blockBlobs to cache", "block", block.number) return nil } func (c *BlobDiskCache) Blobs(number uint64) []blob { c.mu.RLock() - id, ok := c.lookup[number] + bb, ok := c.lookup[number] c.mu.RUnlock() if !ok { return nil } - block, err := c.getBlockBlobsById(id) - if err != nil || block == nil { - return nil - } - c.lg.Info("Blobs from cache", "block", block.number, "id", id) + c.lg.Info("Blobs from cache", "block", bb.number) res := []blob{} - for _, blob := range block.blobs { - res = append(res, *blob) + for _, bc := range bb.blobs { + data, err := c.store.Get(bc.dataId) + if err != nil { + c.lg.Error("Failed to get blockBlobs from storage", "block", number, "err", err) + return nil + } + res = append(res, blob{ + kvIndex: bc.kvIndex, + kvSize: bc.kvSize, + hash: bc.hash, + data: data, + }) } return res } func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { - blob := c.getBlobByIndex(idx) - if blob != nil && - bytes.Equal(blob.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { - return blob.data - } - return nil -} - -// Access without verification through a hash: only for miner sampling -func (c *BlobDiskCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { - blob := c.getBlobByIndex(idx) - if blob != nil { - return blob.data + c.mu.RLock() + defer c.mu.RUnlock() + + for _, bb := range c.lookup { + for _, b := range bb.blobs { + if b.kvIndex.Uint64() == idx && + bytes.Equal(b.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { + data, err := c.store.Get(b.dataId) + if err != nil { + c.lg.Error("Failed to get kv from downloader cache", "kvIndex", idx, "id", b.dataId, "err", err) + return nil + } + return data + } + } } return nil } -func (c *BlobDiskCache) getBlobByIndex(idx uint64) *blob { +func (c *BlobDiskCache) GetSampleData(idx, sampleIdx uint64) []byte { c.mu.RLock() id, ok := c.index[idx] c.mu.RUnlock() if !ok { return nil } - block, err := c.getBlockBlobsById(id) - if err != nil || block == nil { + + off := sampleIdx << ethstorage.SampleSizeBits + size := uint64(1 << ethstorage.SampleSizeBits) + data, err := c.store.GetSample(id, off, size) + if err != nil { return nil } - for _, blob := range block.blobs { - if blob != nil && blob.kvIndex.Uint64() == idx { - return blob - } - } - return nil + return data } func (c *BlobDiskCache) Cleanup(finalized uint64) { c.mu.Lock() defer c.mu.Unlock() - for hash, id := range c.lookup { - block, err := c.getBlockBlobsById(id) - if err != nil { - c.lg.Error("Failed to get block from id", "id", id, "err", err) - continue - } - if block != nil && block.number <= finalized { - if err := c.store.Delete(id); err != nil { - c.lg.Error("Failed to delete block from id", "id", id, "err", err) - } - delete(c.lookup, hash) + for number, block := range c.lookup { + if number <= finalized { + delete(c.lookup, number) for _, blob := range block.blobs { - if blob != nil && blob.kvIndex != nil { + if blob.kvIndex != nil { delete(c.index, blob.kvIndex.Uint64()) } + if err := c.store.Delete(blob.dataId); err != nil { + c.lg.Error("Failed to delete block from id", "id", blob.dataId, "err", err) + } } - c.lg.Info("Cleanup deleted", "finalized", finalized, "block", block.number, "id", id) + c.lg.Info("Cleanup deleted", "finalized", finalized, "block", block.number) } } } -func (c *BlobDiskCache) getBlockBlobsById(id uint64) (*blockBlobs, error) { - data, err := c.store.Get(id) - if err != nil { - c.lg.Error("Failed to get block from id", "id", id, "err", err) - return nil, err - } - if len(data) == 0 { - c.lg.Warn("BlockBlobs not found", "id", id) - return nil, fmt.Errorf("not found: id=%d", id) - } - item := new(blockBlobs) - if err := rlp.DecodeBytes(data, item); err != nil { - c.lg.Error("Failed to decode block", "id", id, "err", err) - return nil, err - } - return item, nil -} - func (c *BlobDiskCache) Close() error { c.lg.Warn("Closing BlobDiskCache") c.mu.Lock() defer c.mu.Unlock() - for _, id := range c.lookup { + for _, id := range c.index { if err := c.store.Delete(id); err != nil { - c.lg.Warn("Failed to delete block from id", "id", id, "err", err) + c.lg.Warn("Failed to delete blob from id", "id", id, "err", err) } } c.lookup = nil @@ -188,29 +191,8 @@ func (c *BlobDiskCache) Close() error { return c.store.Close() } -// newSlotter creates a helper method for the Billy datastore that returns the -// individual shelf sizes used to store blobs in. - -// | blobs | shelf size | data size| -// |--|--|--| -// | 1 | 131128 |131125| -// | 2 | 262244 |262242| -// | 3 | 393360 |393358| -// | 4 | 524476 |524472| -// | 5 | 655592 |655585| -// | 6 | 786708 |786701| - func newSlotter() func() (uint32, bool) { - var ( - slotsize uint32 = 12 - blobCount uint32 = 1 - ) - return func() (size uint32, done bool) { - slotsize += blobSize + 44 - size = slotsize - done = blobCount == maxBlobsPerTransaction - blobCount++ - return + return blobSize + 4, true } } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index 68350a2e..c95d9b78 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -9,7 +9,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "io" "math/big" "os" "path/filepath" @@ -20,7 +19,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/ethstorage/go-ethstorage/ethstorage" @@ -44,7 +42,7 @@ type BlobCache interface { SetBlockBlobs(block *blockBlobs) error Blobs(number uint64) []blob GetKeyValueByIndex(idx uint64, hash common.Hash) []byte - GetKeyValueByIndexUnchecked(idx uint64) []byte + GetSampleData(idx uint64, sampleIdx uint64) []byte Cleanup(finalized uint64) Close() error } @@ -87,35 +85,6 @@ func (b *blob) String() string { return fmt.Sprintf("blob{kvIndex: %d, hash: %x, data: %s}", b.kvIndex, b.hash, b.data) } -func (b *blob) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, []interface{}{ - b.kvIndex, - b.kvSize, - b.hash, - b.data, - }) -} - -func (b *blob) DecodeRLP(s *rlp.Stream) error { - var decodedData struct { - KvIndex *big.Int - KvSize *big.Int - Hash common.Hash - Data []byte - } - - if err := s.Decode(&decodedData); err != nil { - return err - } - - b.kvIndex = decodedData.KvIndex - b.kvSize = decodedData.KvSize - b.hash = decodedData.Hash - b.data = decodedData.Data - - return nil -} - type blockBlobs struct { timestamp uint64 number uint64 @@ -126,32 +95,6 @@ func (b *blockBlobs) String() string { return fmt.Sprintf("blockBlobs{number: %d, timestamp: %d, blobs: %d}", b.number, b.timestamp, len(b.blobs)) } -func (bb *blockBlobs) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, []interface{}{ - bb.timestamp, - bb.number, - bb.blobs, - }) -} - -func (bb *blockBlobs) DecodeRLP(s *rlp.Stream) error { - var decodedData struct { - Timestamp uint64 - Number uint64 - Blobs []*blob - } - - if err := s.Decode(&decodedData); err != nil { - return err - } - - bb.timestamp = decodedData.Timestamp - bb.number = decodedData.Number - bb.blobs = decodedData.Blobs - - return nil -} - func NewDownloader( l1Source *eth.PollingClient, l1Beacon *eth.BeaconClient, diff --git a/go.mod b/go.mod index 3444e81a..59270f63 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect - github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da // indirect + github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect diff --git a/go.sum b/go.sum index dec203a3..dfcbf7c5 100644 --- a/go.sum +++ b/go.sum @@ -162,6 +162,8 @@ github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= +github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 h1:yh/FiW8cnh5SraRSHPQ/qsoT7fonF4mwX1Nl/Bw4VwU= +github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293/go.mod h1:y+M5PibhfdCZkQbwwFRIRIgGVc1txfqc3qvBeoQGoGw= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425 h1:dKQu1oXrt6ndFl4XtAZsfBubU8c5W59T85L8MGtWawE= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -298,10 +300,6 @@ github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/herumi/bls-eth-go-binary v1.28.1 h1:fcIZ48y5EE9973k05XjE8+P3YiQgjZz4JI/YabAm8KA= github.com/herumi/bls-eth-go-binary v1.28.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= -github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da h1:8qEhdMGSUx67L2s5aGQinJhOwLfIRKLRBHPQq8m6WxE= -github.com/holiman/billy v0.0.0-20240322075458-72a4e81ec6da/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= From b05b8829c0d8d1c1777980c814fa09aa7db2765d Mon Sep 17 00:00:00 2001 From: syntrust Date: Sat, 27 Jul 2024 10:32:05 +0800 Subject: [PATCH 52/61] fix test --- ethstorage/downloader/blob_mem_cache.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ethstorage/downloader/blob_mem_cache.go b/ethstorage/downloader/blob_mem_cache.go index 285ca018..f6254b31 100644 --- a/ethstorage/downloader/blob_mem_cache.go +++ b/ethstorage/downloader/blob_mem_cache.go @@ -59,14 +59,17 @@ func (c *BlobMemCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte { return nil } -func (c *BlobMemCache) GetKeyValueByIndexUnchecked(idx uint64) []byte { +func (c *BlobMemCache) GetSampleData(idx, sampleIdxInKv uint64) []byte { c.mu.RLock() defer c.mu.RUnlock() for _, block := range c.blocks { for _, blob := range block.blobs { if blob.kvIndex.Uint64() == idx { - return blob.data + sampleSize := uint64(1 << ethstorage.SampleSizeBits) + sampleIdxByte := sampleIdxInKv << ethstorage.SampleSizeBits + sample := blob.data[sampleIdxByte : sampleIdxByte+sampleSize] + return sample } } } From 2de81142f8c0d9a174176ccedb20cf50e5d31859 Mon Sep 17 00:00:00 2001 From: syntrust Date: Sat, 27 Jul 2024 10:35:38 +0800 Subject: [PATCH 53/61] fix test --- ethstorage/downloader/blob_cache_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index e356486d..893c6cf8 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -149,7 +149,7 @@ func TestEncoding(t *testing.T) { for i, kvHash := range kvHashes { kvIndex := uint64(i) t.Run(fmt.Sprintf("test kv: %d", i), func(t *testing.T) { - blobEncoded := cache.GetKeyValueByIndexUnchecked(kvIndex) + blobEncoded := cache.GetKeyValueByIndex(kvIndex, kvHash) blobDecoded := sm.DecodeBlob(blobEncoded, kvHash, kvIndex, kvSize) bytesWant := []byte(fmt.Sprintf(blobData, kvIndex)) if !bytes.Equal(blobDecoded[:len(bytesWant)], bytesWant) { From 60d3c22906cd79d961695c0794d885c9dad6c720 Mon Sep 17 00:00:00 2001 From: syntrust Date: Sat, 27 Jul 2024 11:29:47 +0800 Subject: [PATCH 54/61] fix test --- ethstorage/downloader/blob_cache_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 893c6cf8..4070c3a3 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -35,8 +35,8 @@ func TestNewSlotter(t *testing.T) { var lastSize uint32 for i := 0; i < 10; i++ { size, done := slotter() - // shelf0 is for block with 1 blob, and so on. - if !(size > uint32((i+1)*blobSize) && size < uint32((i+2)*blobSize)) { + // shelf0 is for block with 1 blob. + if !(size > blobSize && size < 2*blobSize) { t.Errorf("Slotter returned incorrect size at shelf %d", i) } if done { @@ -44,7 +44,7 @@ func TestNewSlotter(t *testing.T) { break } } - if lastSize/blobSize != maxBlobsPerTransaction { + if !(lastSize > blobSize && lastSize < 2*blobSize) { t.Errorf("Slotter did not return correct last size") } } From 58bcd200901b2e3e7c008ac7b5ea029425ca78e9 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 30 Jul 2024 09:54:29 +0800 Subject: [PATCH 55/61] test sample --- ethstorage/downloader/blob_cache_test.go | 96 +++++++++++++++++++----- ethstorage/downloader/blob_disk_cache.go | 15 ++-- 2 files changed, 84 insertions(+), 27 deletions(-) diff --git a/ethstorage/downloader/blob_cache_test.go b/ethstorage/downloader/blob_cache_test.go index 4070c3a3..49772e97 100644 --- a/ethstorage/downloader/blob_cache_test.go +++ b/ethstorage/downloader/blob_cache_test.go @@ -7,9 +7,11 @@ import ( "bytes" "fmt" "math/big" + "math/rand" "os" "path/filepath" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -24,31 +26,13 @@ var ( datadir string fileName = "test_shard_0.dat" blobData = "blob data of kvIndex %d" + sampleLen = blobSize / sampleSize minerAddr = common.BigToAddress(common.Big1) kvSize uint64 = 1 << 17 kvEntries uint64 = 16 shardID = uint64(0) ) -func TestNewSlotter(t *testing.T) { - slotter := newSlotter() - var lastSize uint32 - for i := 0; i < 10; i++ { - size, done := slotter() - // shelf0 is for block with 1 blob. - if !(size > blobSize && size < 2*blobSize) { - t.Errorf("Slotter returned incorrect size at shelf %d", i) - } - if done { - lastSize = size - break - } - } - if !(lastSize > blobSize && lastSize < 2*blobSize) { - t.Errorf("Slotter did not return correct last size") - } -} - func TestDiskBlobCache(t *testing.T) { setup(t) t.Cleanup(func() { @@ -159,6 +143,80 @@ func TestEncoding(t *testing.T) { } } +func TestBlobDiskCache_GetSampleData(t *testing.T) { + setup(t) + t.Cleanup(func() { + teardown(t) + }) + + const blockStart = 10000000 + rand.New(rand.NewSource(time.Now().UnixNano())) + kvIndex2BlockNumber := map[uint64]uint64{} + kvIndex2BlobIndex := map[uint64]uint64{} + + newBlockBlobsFilled := func(blockNumber, blobLen uint64) (*blockBlobs, error) { + block := &blockBlobs{ + number: blockNumber, + blobs: make([]*blob, blobLen), + } + for i := uint64(0); i < blobLen; i++ { + kvIndex := uint64(len(kvHashes)) + blob := &blob{ + kvIndex: new(big.Int).SetUint64(kvIndex), + data: fill(blockNumber, i), + } + kzgBlob := kzg4844.Blob{} + copy(kzgBlob[:], blob.data) + commitment, err := kzg4844.BlobToCommitment(kzgBlob) + if err != nil { + return nil, fmt.Errorf( + "failed to create commitment for blob %d: %w", kvIndex, err) + } + blob.hash = common.Hash(eth.KZGToVersionedHash(eth.KZGCommitment(commitment))) + block.blobs[i] = blob + kvHashes = append(kvHashes, blob.hash) + kvIndex2BlockNumber[kvIndex] = blockNumber + kvIndex2BlobIndex[kvIndex] = i + } + t.Log("Block created", "number", block.number, "blobs", blobLen) + return block, nil + } + for i := 0; i < 10; i++ { + blockn, blobn := blockStart+i, rand.Intn(6)+1 + block, err := newBlockBlobsFilled(uint64(blockn), uint64(blobn)) + if err != nil { + t.Fatalf("Failed to create new block blobs: %v", err) + } + if err := cache.SetBlockBlobs(block); err != nil { + t.Fatalf("Failed to set block blobs: %v", err) + } + } + + for kvi := range kvHashes { + kvIndex := uint64(kvi) + sampleIndex := rand.Intn(int(sampleLen)) + sample := cache.GetSampleData(kvIndex, uint64(sampleIndex)) + sampleWant := make([]byte, sampleSize) + copy(sampleWant, fmt.Sprintf("%d_%d_%d", kvIndex2BlockNumber[kvIndex], kvIndex2BlobIndex[kvIndex], sampleIndex)) + t.Run(fmt.Sprintf("test sample: kvIndex=%d, sampleIndex=%d", kvIndex, sampleIndex), func(t *testing.T) { + if !bytes.Equal(sample, sampleWant) { + t.Errorf("GetSampleData got %x, want %x", sample, sampleWant) + } + }) + } + +} + +func fill(blockNumber, blobIndex uint64) []byte { + var content []byte + for i := uint64(0); i < sampleLen; i++ { + sample := make([]byte, sampleSize) + copy(sample, fmt.Sprintf("%d_%d_%d", blockNumber, blobIndex, i)) + content = append(content, sample...) + } + return content +} + func newBlockBlobs(blockNumber, blobLen uint64) (*blockBlobs, error) { block := &blockBlobs{ number: blockNumber, diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 0a88c3d2..b504fdd2 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -18,9 +18,10 @@ import ( ) const ( - blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement - maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob - blobCacheDir = "cached_blobs" + itemHeaderSize = 4 // size of the per-item header of billy + sampleSize = uint64(1 << ethstorage.SampleSizeBits) + blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement + blobCacheDir = "cached_blobs" ) type blockBlobsCached struct { @@ -69,7 +70,6 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() defer c.mu.Unlock() - var blobIds []uint64 var bcs []*blobCached for _, b := range block.blobs { id, err := c.store.Put(b.data) @@ -77,7 +77,6 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } - blobIds = append(blobIds, id) c.index[b.kvIndex.Uint64()] = id bcs = append(bcs, &blobCached{ kvIndex: b.kvIndex, @@ -149,9 +148,9 @@ func (c *BlobDiskCache) GetSampleData(idx, sampleIdx uint64) []byte { } off := sampleIdx << ethstorage.SampleSizeBits - size := uint64(1 << ethstorage.SampleSizeBits) - data, err := c.store.GetSample(id, off, size) + data, err := c.store.GetSample(id, off, sampleSize) if err != nil { + c.lg.Error("Failed to get sample from downloader cache", "kvIndex", idx, "id", id, "err", err) return nil } return data @@ -193,6 +192,6 @@ func (c *BlobDiskCache) Close() error { func newSlotter() func() (uint32, bool) { return func() (size uint32, done bool) { - return blobSize + 4, true + return blobSize + itemHeaderSize, true } } From 7ed64e03d9f3b8dea9497b72dbb113fbf910b2e5 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 30 Jul 2024 11:12:05 +0800 Subject: [PATCH 56/61] tidy --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 59270f63..dcd741ab 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect + github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 github.com/fjl/memsize v0.0.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-stack/stack v1.8.1 // indirect @@ -58,7 +59,6 @@ require ( github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/herumi/bls-eth-go-binary v1.28.1 // indirect - github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/kilic/bls12-381 v0.1.1-0.20220929213557-ca162e8a70f4 // indirect diff --git a/go.sum b/go.sum index dfcbf7c5..323ca9ad 100644 --- a/go.sum +++ b/go.sum @@ -300,6 +300,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/herumi/bls-eth-go-binary v1.28.1 h1:fcIZ48y5EE9973k05XjE8+P3YiQgjZz4JI/YabAm8KA= github.com/herumi/bls-eth-go-binary v1.28.1/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= From cb1db88378ecbe1d10acf64534874e8d72dbcf7b Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 30 Jul 2024 11:18:28 +0800 Subject: [PATCH 57/61] tidy --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index dcd741ab..ab5c9a8b 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 + github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7 github.com/fjl/memsize v0.0.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-stack/stack v1.8.1 // indirect diff --git a/go.sum b/go.sum index 323ca9ad..9c9531ad 100644 --- a/go.sum +++ b/go.sum @@ -164,6 +164,8 @@ github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 h1:yh/FiW8cnh5SraRSHPQ/qsoT7fonF4mwX1Nl/Bw4VwU= github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293/go.mod h1:y+M5PibhfdCZkQbwwFRIRIgGVc1txfqc3qvBeoQGoGw= +github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7 h1:wOhbhs4WO8Mf0LT9c2qRxUbspAU9Tg4olLiastQTO5A= +github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7/go.mod h1:y+M5PibhfdCZkQbwwFRIRIgGVc1txfqc3qvBeoQGoGw= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425 h1:dKQu1oXrt6ndFl4XtAZsfBubU8c5W59T85L8MGtWawE= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= From b9ebd75a357dcd53928ef4efd2ae99564014dce6 Mon Sep 17 00:00:00 2001 From: syntrust Date: Tue, 30 Jul 2024 11:38:33 +0800 Subject: [PATCH 58/61] refactor --- ethstorage/downloader/blob_disk_cache.go | 68 ++++++++++-------------- ethstorage/downloader/downloader.go | 1 + 2 files changed, 28 insertions(+), 41 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index b504fdd2..cffd2b92 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -5,7 +5,6 @@ package downloader import ( "bytes" - "math/big" "os" "path/filepath" "sync" @@ -24,25 +23,12 @@ const ( blobCacheDir = "cached_blobs" ) -type blockBlobsCached struct { - timestamp uint64 - number uint64 - blobs []*blobCached -} - -type blobCached struct { - kvIndex *big.Int - kvSize *big.Int - hash common.Hash - dataId uint64 -} - type BlobDiskCache struct { - store billy.Database - lookup map[uint64]*blockBlobsCached // Lookup table mapping block number to blockBlob - index map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id - mu sync.RWMutex // protects lookup and index maps - lg log.Logger + store billy.Database + blockLookup map[uint64]*blockBlobs // Lookup table mapping block number to blockBlob + kvIndexLookup map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id + mu sync.RWMutex // protects lookup and index maps + lg log.Logger } func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { @@ -51,9 +37,9 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { lg.Crit("Failed to create cache directory", "dir", cbdir, "err", err) } c := &BlobDiskCache{ - lookup: make(map[uint64]*blockBlobsCached), - index: make(map[uint64]uint64), - lg: lg, + blockLookup: make(map[uint64]*blockBlobs), + kvIndexLookup: make(map[uint64]uint64), + lg: lg, } store, err := billy.Open(billy.Options{Path: cbdir, Repair: true}, newSlotter(), nil) @@ -70,25 +56,25 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() defer c.mu.Unlock() - var bcs []*blobCached + var blbs []*blob for _, b := range block.blobs { id, err := c.store.Put(b.data) if err != nil { c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) return err } - c.index[b.kvIndex.Uint64()] = id - bcs = append(bcs, &blobCached{ + c.kvIndexLookup[b.kvIndex.Uint64()] = id + blbs = append(blbs, &blob{ kvIndex: b.kvIndex, kvSize: b.kvSize, hash: b.hash, dataId: id, }) } - c.lookup[block.number] = &blockBlobsCached{ + c.blockLookup[block.number] = &blockBlobs{ timestamp: block.timestamp, number: block.number, - blobs: bcs, + blobs: blbs, } c.lg.Info("Set blockBlobs to cache", "block", block.number) return nil @@ -96,23 +82,23 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { func (c *BlobDiskCache) Blobs(number uint64) []blob { c.mu.RLock() - bb, ok := c.lookup[number] + bb, ok := c.blockLookup[number] c.mu.RUnlock() if !ok { return nil } c.lg.Info("Blobs from cache", "block", bb.number) res := []blob{} - for _, bc := range bb.blobs { - data, err := c.store.Get(bc.dataId) + for _, blb := range bb.blobs { + data, err := c.store.Get(blb.dataId) if err != nil { c.lg.Error("Failed to get blockBlobs from storage", "block", number, "err", err) return nil } res = append(res, blob{ - kvIndex: bc.kvIndex, - kvSize: bc.kvSize, - hash: bc.hash, + kvIndex: blb.kvIndex, + kvSize: blb.kvSize, + hash: blb.hash, data: data, }) } @@ -123,7 +109,7 @@ func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte c.mu.RLock() defer c.mu.RUnlock() - for _, bb := range c.lookup { + for _, bb := range c.blockLookup { for _, b := range bb.blobs { if b.kvIndex.Uint64() == idx && bytes.Equal(b.hash[0:ethstorage.HashSizeInContract], hash[0:ethstorage.HashSizeInContract]) { @@ -141,7 +127,7 @@ func (c *BlobDiskCache) GetKeyValueByIndex(idx uint64, hash common.Hash) []byte func (c *BlobDiskCache) GetSampleData(idx, sampleIdx uint64) []byte { c.mu.RLock() - id, ok := c.index[idx] + id, ok := c.kvIndexLookup[idx] c.mu.RUnlock() if !ok { return nil @@ -160,12 +146,12 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { c.mu.Lock() defer c.mu.Unlock() - for number, block := range c.lookup { + for number, block := range c.blockLookup { if number <= finalized { - delete(c.lookup, number) + delete(c.blockLookup, number) for _, blob := range block.blobs { if blob.kvIndex != nil { - delete(c.index, blob.kvIndex.Uint64()) + delete(c.kvIndexLookup, blob.kvIndex.Uint64()) } if err := c.store.Delete(blob.dataId); err != nil { c.lg.Error("Failed to delete block from id", "id", blob.dataId, "err", err) @@ -180,13 +166,13 @@ func (c *BlobDiskCache) Close() error { c.lg.Warn("Closing BlobDiskCache") c.mu.Lock() defer c.mu.Unlock() - for _, id := range c.index { + for _, id := range c.kvIndexLookup { if err := c.store.Delete(id); err != nil { c.lg.Warn("Failed to delete blob from id", "id", id, "err", err) } } - c.lookup = nil - c.index = nil + c.blockLookup = nil + c.kvIndexLookup = nil return c.store.Close() } diff --git a/ethstorage/downloader/downloader.go b/ethstorage/downloader/downloader.go index c95d9b78..454b74fb 100644 --- a/ethstorage/downloader/downloader.go +++ b/ethstorage/downloader/downloader.go @@ -79,6 +79,7 @@ type blob struct { kvSize *big.Int hash common.Hash data []byte + dataId uint64 } func (b *blob) String() string { From e5d22c89a36264fa2dff3aefa281718de8754c7a Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 31 Jul 2024 10:48:54 +0800 Subject: [PATCH 59/61] fix comments --- ethstorage/downloader/blob_disk_cache.go | 38 +++++++++++++++++------- go.mod | 2 +- go.sum | 2 -- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index cffd2b92..713c98cb 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -5,6 +5,7 @@ package downloader import ( "bytes" + "errors" "os" "path/filepath" "sync" @@ -25,6 +26,7 @@ const ( type BlobDiskCache struct { store billy.Database + storePath string blockLookup map[uint64]*blockBlobs // Lookup table mapping block number to blockBlob kvIndexLookup map[uint64]uint64 // Lookup table mapping kvIndex to blob billy entries id mu sync.RWMutex // protects lookup and index maps @@ -39,6 +41,7 @@ func NewBlobDiskCache(datadir string, lg log.Logger) *BlobDiskCache { c := &BlobDiskCache{ blockLookup: make(map[uint64]*blockBlobs), kvIndexLookup: make(map[uint64]uint64), + storePath: cbdir, lg: lg, } @@ -58,12 +61,18 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { var blbs []*blob for _, b := range block.blobs { + kvi := b.kvIndex.Uint64() + if id, ok := c.kvIndexLookup[kvi]; ok { + if err := c.store.Delete(id); err != nil { + c.lg.Warn("Failed to delete blob from cache", "kvIndex", kvi, "id", id, "err", err) + } + } id, err := c.store.Put(b.data) if err != nil { - c.lg.Error("Failed to write blockBlobs into storage", "block", block.number, "err", err) + c.lg.Error("Failed to put blob into cache", "block", block.number, "kvIndex", kvi, "err", err) return err } - c.kvIndexLookup[b.kvIndex.Uint64()] = id + c.kvIndexLookup[kvi] = id blbs = append(blbs, &blob{ kvIndex: b.kvIndex, kvSize: b.kvSize, @@ -163,17 +172,24 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { } func (c *BlobDiskCache) Close() error { - c.lg.Warn("Closing BlobDiskCache") - c.mu.Lock() - defer c.mu.Unlock() - for _, id := range c.kvIndexLookup { - if err := c.store.Delete(id); err != nil { - c.lg.Warn("Failed to delete blob from id", "id", id, "err", err) + var er error + if err := c.store.Close(); err != nil { + c.lg.Error("Failed to close cache", "err", err) + er = err + } + if err := os.RemoveAll(c.storePath); err != nil { + c.lg.Error("Failed to remove cache dir", "err", err) + if er == nil { + er = err + } else { + er = errors.New(er.Error() + "; " + err.Error()) } } - c.blockLookup = nil - c.kvIndexLookup = nil - return c.store.Close() + if er != nil { + return er + } + c.lg.Info("BlobDiskCache closed.") + return nil } func newSlotter() func() (uint32, bool) { diff --git a/go.mod b/go.mod index ab5c9a8b..6cec0395 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 github.com/ethereum-optimism/optimism v1.2.0 github.com/ethereum/go-ethereum v1.13.5 + github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/gorilla/mux v1.8.1 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d @@ -46,7 +47,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7 github.com/fjl/memsize v0.0.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-stack/stack v1.8.1 // indirect diff --git a/go.sum b/go.sum index 9c9531ad..06d1e979 100644 --- a/go.sum +++ b/go.sum @@ -162,8 +162,6 @@ github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= -github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293 h1:yh/FiW8cnh5SraRSHPQ/qsoT7fonF4mwX1Nl/Bw4VwU= -github.com/ethstorage/billy v0.0.0-20240726111512-742ba0782293/go.mod h1:y+M5PibhfdCZkQbwwFRIRIgGVc1txfqc3qvBeoQGoGw= github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7 h1:wOhbhs4WO8Mf0LT9c2qRxUbspAU9Tg4olLiastQTO5A= github.com/ethstorage/billy v0.0.0-20240730021803-ca24378685e7/go.mod h1:y+M5PibhfdCZkQbwwFRIRIgGVc1txfqc3qvBeoQGoGw= github.com/ethstorage/go-iden3-crypto v0.0.0-20230406080944-d89aec086425 h1:dKQu1oXrt6ndFl4XtAZsfBubU8c5W59T85L8MGtWawE= From 8ba6628cf6ee3c14a0562361825e40f2b05da2f1 Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 31 Jul 2024 14:09:49 +0800 Subject: [PATCH 60/61] stats --- ethstorage/downloader/blob_disk_cache.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 713c98cb..1dbce17d 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -145,16 +146,21 @@ func (c *BlobDiskCache) GetSampleData(idx, sampleIdx uint64) []byte { off := sampleIdx << ethstorage.SampleSizeBits data, err := c.store.GetSample(id, off, sampleSize) if err != nil { - c.lg.Error("Failed to get sample from downloader cache", "kvIndex", idx, "id", id, "err", err) + c.lg.Error("Failed to get sample from downloader cache", "kvIndex", idx, "sampleIndex", sampleIdx, "id", id, "err", err) return nil } return data } func (c *BlobDiskCache) Cleanup(finalized uint64) { + start := time.Now() + defer func() { + c.lg.Info("BlobDiskCache cleanup done", "took", time.Since(start)) + }() c.mu.Lock() defer c.mu.Unlock() + var blocksCleaned, blobsCleaned int for number, block := range c.blockLookup { if number <= finalized { delete(c.blockLookup, number) @@ -165,10 +171,12 @@ func (c *BlobDiskCache) Cleanup(finalized uint64) { if err := c.store.Delete(blob.dataId); err != nil { c.lg.Error("Failed to delete block from id", "id", blob.dataId, "err", err) } + blobsCleaned++ } - c.lg.Info("Cleanup deleted", "finalized", finalized, "block", block.number) + blocksCleaned++ } } + c.lg.Info("Cleanup done", "blockFinalized", finalized, "blocksCleaned", blocksCleaned, "blobsCleaned", blobsCleaned) } func (c *BlobDiskCache) Close() error { From 808d9af903c2e2072cc7998538c4c9801c3ee5ba Mon Sep 17 00:00:00 2001 From: syntrust Date: Wed, 31 Jul 2024 16:53:12 +0800 Subject: [PATCH 61/61] fix comments --- ethstorage/downloader/blob_disk_cache.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/ethstorage/downloader/blob_disk_cache.go b/ethstorage/downloader/blob_disk_cache.go index 1dbce17d..df0f3ff4 100644 --- a/ethstorage/downloader/blob_disk_cache.go +++ b/ethstorage/downloader/blob_disk_cache.go @@ -60,14 +60,16 @@ func (c *BlobDiskCache) SetBlockBlobs(block *blockBlobs) error { c.mu.Lock() defer c.mu.Unlock() + if blockOld, ok := c.blockLookup[block.number]; ok { + for _, b := range blockOld.blobs { + if err := c.store.Delete(b.dataId); err != nil { + c.lg.Warn("Failed to delete blob from cache", "kvIndex", b.kvIndex, "id", b.dataId, "err", err) + } + } + } var blbs []*blob for _, b := range block.blobs { kvi := b.kvIndex.Uint64() - if id, ok := c.kvIndexLookup[kvi]; ok { - if err := c.store.Delete(id); err != nil { - c.lg.Warn("Failed to delete blob from cache", "kvIndex", kvi, "id", id, "err", err) - } - } id, err := c.store.Put(b.data) if err != nil { c.lg.Error("Failed to put blob into cache", "block", block.number, "kvIndex", kvi, "err", err)