Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: use new da-codec interfaces #1068

Merged
merged 14 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 0 additions & 41 deletions core/rawdb/accessors_rollup_event.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 {
return &rollupEventSyncedL1BlockNumber
}

// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database.
// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index.
// for backward compatibility, new info is also stored in CommittedBatchMeta.
func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) {
value, err := rlp.EncodeToBytes(chunkBlockRanges)
if err != nil {
log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err)
}
if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil {
log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err)
}
}

// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database.
// Note: Only non-finalized batches can be reverted.
// for backward compatibility, new info is also stored in CommittedBatchMeta.
func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) {
if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil {
log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err)
}
}

// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database.
// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index.
// for backward compatibility, new info is also stored in CommittedBatchMeta.
func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange {
data, err := db.Get(batchChunkRangesKey(batchIndex))
if err != nil && isNotFoundErr(err) {
return nil
}
if err != nil {
log.Crit("failed to read batch chunk ranges from database", "err", err)
}

cr := new([]*ChunkBlockRange)
if err := rlp.Decode(bytes.NewReader(data), cr); err != nil {
log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err)
}
return *cr
}

// WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database.
func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) {
value, err := rlp.EncodeToBytes(finalizedBatchMeta)
Expand Down
64 changes: 0 additions & 64 deletions core/rawdb/accessors_rollup_event_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) {
}
}

func TestBatchChunkRanges(t *testing.T) {
chunks := [][]*ChunkBlockRange{
{
{StartBlockNumber: 1, EndBlockNumber: 100},
{StartBlockNumber: 101, EndBlockNumber: 200},
},
{
{StartBlockNumber: 201, EndBlockNumber: 300},
{StartBlockNumber: 301, EndBlockNumber: 400},
},
{
{StartBlockNumber: 401, EndBlockNumber: 500},
},
}

db := NewMemoryDatabase()

for i, chunkRange := range chunks {
batchIndex := uint64(i)
WriteBatchChunkRanges(db, batchIndex, chunkRange)
}

for i, chunkRange := range chunks {
batchIndex := uint64(i)
readChunkRange := ReadBatchChunkRanges(db, batchIndex)
if len(readChunkRange) != len(chunkRange) {
t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange))
}

for j, cr := range readChunkRange {
if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber {
t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr)
}
}
}

// over-write
newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}}
WriteBatchChunkRanges(db, 0, newRange)
readChunkRange := ReadBatchChunkRanges(db, 0)
if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 {
t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange)
}

// read non-existing value
if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil {
t.Fatal("Expected nil for non-existing value", "got", readChunkRange)
}

// delete: revert batch
for i := range chunks {
batchIndex := uint64(i)
DeleteBatchChunkRanges(db, batchIndex)

readChunkRange := ReadBatchChunkRanges(db, batchIndex)
if readChunkRange != nil {
t.Fatal("Chunk range was not deleted", "batch index", batchIndex)
}
}

// delete non-existing value: ensure the delete operation handles non-existing values without errors.
DeleteBatchChunkRanges(db, uint64(len(chunks)+1))
}

func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) {
db := NewMemoryDatabase()

Expand Down
6 changes: 0 additions & 6 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ var (

// Scroll rollup event store
rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber")
batchChunkRangesPrefix = []byte("R-bcr")
batchMetaPrefix = []byte("R-bm")
finalizedL2BlockNumberKey = []byte("R-finalized")
lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex")
Expand Down Expand Up @@ -410,11 +409,6 @@ func SkippedTransactionHashKey(index uint64) []byte {
return append(skippedTransactionHashPrefix, encodeBigEndian(index)...)
}

// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian)
func batchChunkRangesKey(batchIndex uint64) []byte {
return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...)
}

// batchMetaKey = batchMetaPrefix + batch index (uint64 big endian)
func batchMetaKey(batchIndex uint64) []byte {
return append(batchMetaPrefix, encodeBigEndian(batchIndex)...)
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.21
require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Microsoft/go-winio v0.6.1
github.com/VictoriaMetrics/fastcache v1.12.1
github.com/VictoriaMetrics/fastcache v1.12.2
github.com/aws/aws-sdk-go-v2 v1.21.2
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
Expand Down Expand Up @@ -57,7 +57,7 @@ require (
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7
github.com/rs/cors v1.7.0
github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967
github.com/scroll-tech/da-codec v0.1.2
github.com/scroll-tech/zktrie v0.8.4
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sourcegraph/conc v0.3.0
Expand Down
12 changes: 7 additions & 5 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,10 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
Expand Down Expand Up @@ -472,8 +474,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967 h1:FSM0l1n5KszBjPFOnMbSa4pg3zv07DYIU2VnH6BUH34=
github.com/scroll-tech/da-codec v0.1.1-0.20240902151734-41c648646967/go.mod h1:O9jsbQGNnTEfyfZg7idevq6jGGSQshX70elX+TRH8vU=
github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc=
github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
Expand Down Expand Up @@ -693,9 +695,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
Expand Down
2 changes: 1 addition & 1 deletion rollup/da_syncer/batch_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) {
return nil, err
}
switch daEntry.Type() {
case da.CommitBatchV0Type, da.CommitBatchV1Type, da.CommitBatchV2Type:
case da.CommitBatchV0Type, da.CommitBatchWithBlobType:
bq.addBatch(daEntry)
case da.RevertBatchType:
bq.deleteBatch(daEntry)
Expand Down
27 changes: 16 additions & 11 deletions rollup/da_syncer/da/calldata_blob_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"fmt"

"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
Expand Down Expand Up @@ -205,19 +206,21 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo
if err != nil {
return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}

if method.Name == commitBatchMethodName {
args, err := newCommitBatchArgs(method, values)
if err != nil {
return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
codecVersion := encoding.CodecVersion(args.Version)
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err)
}
colinlyguo marked this conversation as resolved.
Show resolved Hide resolved
switch args.Version {
case 0:
return NewCommitBatchDAV0(ds.db, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber)
case 1:
return NewCommitBatchDAV1(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
case 2:
return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber)
case 1, 2:
return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
colinlyguo marked this conversation as resolved.
Show resolved Hide resolved
default:
return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version)
}
Expand All @@ -226,12 +229,14 @@ func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Lo
if err != nil {
return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
codecVersion := encoding.CodecVersion(args.Version)
codec, err := encoding.CodecFromVersion(codecVersion)
if err != nil {
return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err)
}
switch args.Version {
case 3:
// we can use V2 for version 3, because it's same
return NewCommitBatchDAV2(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
case 4:
return NewCommitBatchDAV4(ds.ctx, ds.db, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
case 3, 4:
return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap)
default:
return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version)
}
Expand Down
27 changes: 13 additions & 14 deletions rollup/da_syncer/da/commitV0.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"

"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv0"

"github.com/scroll-tech/go-ethereum/core/rawdb"
"github.com/scroll-tech/go-ethereum/core/types"
Expand All @@ -18,21 +17,22 @@ type CommitBatchDAV0 struct {
batchIndex uint64
parentTotalL1MessagePopped uint64
skippedL1MessageBitmap []byte
chunks []*codecv0.DAChunkRawTx
chunks []*encoding.DAChunkRawTx
l1Txs []*types.L1MessageTx

l1BlockNumber uint64
}

func NewCommitBatchDAV0(db ethdb.Database,
codec encoding.Codec,
version uint8,
batchIndex uint64,
parentBatchHeader []byte,
chunks [][]byte,
skippedL1MessageBitmap []byte,
l1BlockNumber uint64,
) (*CommitBatchDAV0, error) {
decodedChunks, err := codecv0.DecodeDAChunksRawTx(chunks)
decodedChunks, err := codec.DecodeDAChunksRawTx(chunks)
if err != nil {
return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err)
}
Expand All @@ -44,7 +44,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database,
version uint8,
batchIndex uint64,
parentBatchHeader []byte,
decodedChunks []*codecv0.DAChunkRawTx,
decodedChunks []*encoding.DAChunkRawTx,
skippedL1MessageBitmap []byte,
l1BlockNumber uint64,
) (*CommitBatchDAV0, error) {
Expand Down Expand Up @@ -100,24 +100,24 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock {
for _, chunk := range c.chunks {
for blockId, daBlock := range chunk.Blocks {
// create txs
txs := make(types.Transactions, 0, daBlock.NumTransactions)
txs := make(types.Transactions, 0, daBlock.NumTransactions())
// insert l1 msgs
for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages) {
for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages()) {
l1Tx := types.NewTx(c.l1Txs[l1TxPointer])
txs = append(txs, l1Tx)
l1TxPointer++
}
curL1TxIndex += uint64(daBlock.NumL1Messages)
curL1TxIndex += uint64(daBlock.NumL1Messages())

// insert l2 txs
txs = append(txs, chunk.Transactions[blockId]...)

block := NewPartialBlock(
&PartialHeader{
Number: daBlock.BlockNumber,
Time: daBlock.Timestamp,
BaseFee: daBlock.BaseFee,
GasLimit: daBlock.GasLimit,
Number: daBlock.Number(),
Time: daBlock.Timestamp(),
BaseFee: daBlock.BaseFee(),
GasLimit: daBlock.GasLimit(),
Difficulty: 10, // TODO: replace with real difficulty
ExtraData: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // TODO: replace with real extra data
},
Expand All @@ -129,19 +129,18 @@ func (c *CommitBatchDAV0) Blocks() []*PartialBlock {
return blocks
}

func getTotalMessagesPoppedFromChunks(decodedChunks []*codecv0.DAChunkRawTx) int {
func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) int {
totalL1MessagePopped := 0
for _, chunk := range decodedChunks {
for _, block := range chunk.Blocks {
totalL1MessagePopped += int(block.NumL1Messages)
totalL1MessagePopped += int(block.NumL1Messages())
}
}
return totalL1MessagePopped
}

func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) {
var txs []*types.L1MessageTx

decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped)
if err != nil {
return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err)
Expand Down
Loading
Loading