From 4c558435b1280e6fe9bbb62070a039effeeeed53 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 25 Oct 2023 10:59:40 -0600 Subject: [PATCH 01/22] Backport geth GetEVM nil BlockContext fix --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 921163a16b..4a5108752c 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 921163a16b537c08b1a383663ce2c4f3bd84a3a0 +Subproject commit 4a5108752c2b34fc83cb0d0c8447cded82921e59 From f64d4a2fd46f55496bd17e06406f94734597ac11 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 31 Oct 2023 19:55:43 -0600 Subject: [PATCH 02/22] arbnode: init broadcast server before sequencing sequencing attempts to send broadcast messages this gets stuck if broadcast server isn't started and deadlocked in case of ForceSequenceDelayed --- arbnode/node.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index be3380954f..e671a191fb 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -1056,6 +1056,13 @@ func (n *Node) Start(ctx context.Context) error { return fmt.Errorf("error starting inbox reader: %w", err) } } + // broadcastServer must be started befopre sequencing any transactions + if n.BroadcastServer != nil { + err = n.BroadcastServer.Start(ctx) + if err != nil { + return fmt.Errorf("error starting feed broadcast server: %w", err) + } + } if n.DelayedSequencer != nil && n.SeqCoordinator == nil { err = n.DelayedSequencer.ForceSequenceDelayed(ctx) if err != nil { @@ -1114,12 +1121,6 @@ func (n *Node) Start(ctx context.Context) error { if n.L1Reader != nil { n.L1Reader.Start(ctx) } - if n.BroadcastServer != nil { - err = n.BroadcastServer.Start(ctx) - if err != nil { - return fmt.Errorf("error starting feed broadcast server: %w", err) - } - } if n.BroadcastClients != nil { go func() { if n.InboxReader != nil { From 1ad4c7a6b7f8b95cb2a53070ff82058ad0b50e96 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Tue, 28 Nov 2023 17:22:22 +0530 Subject: [PATCH 03/22] feat: enable das with integration of avail da --- Dockerfile | 7 +- arbnode/batch_poster.go | 37 ++-- arbnode/delayed_seq_reorg_test.go | 2 +- arbnode/inbox_tracker.go | 25 +-- arbnode/node.go | 16 +- arbstate/inbox.go | 71 +++++++- arbstate/inbox_fuzz_test.go | 2 +- cmd/nitro/init.go | 2 +- cmd/replay/main.go | 2 +- das/avail/avail.go | 213 +++++++++++++++++++++++ das/avail/blob.go | 31 ++++ das/avail/config.go | 17 ++ das/avail/da_interface.go | 13 ++ das/avail/utils.go | 12 ++ go.mod | 41 +++-- go.sum | 89 +++++++--- staker/stateless_block_validator.go | 42 +++-- system_tests/batch_poster_test.go | 2 +- system_tests/full_challenge_impl_test.go | 4 +- system_tests/staker_test.go | 2 + system_tests/state_fuzz_test.go | 2 +- 21 files changed, 538 insertions(+), 94 deletions(-) create mode 100644 das/avail/avail.go create mode 100644 das/avail/blob.go create mode 100644 das/avail/config.go create mode 100644 das/avail/da_interface.go create mode 100644 das/avail/utils.go diff --git a/Dockerfile b/Dockerfile index 96bcb22952..da82f85964 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,9 +39,9 @@ WORKDIR /workspace RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base as wasm-libs-builder - # clang / lld used by soft-float wasm +# clang / lld used by soft-float wasm RUN apt-get install -y clang=1:11.0-51+nmu5 lld=1:11.0-51+nmu5 - # pinned rust 1.65.0 +# pinned rust 1.65.0 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.68.2 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ COPY arbitrator/arbutil arbitrator/arbutil @@ -53,7 +53,7 @@ FROM scratch as wasm-libs-export COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base as wasm-bin-builder - # pinned go version +# pinned go version RUN curl -L https://golang.org/dl/go1.20.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress @@ -65,6 +65,7 @@ COPY ./blsSignatures ./blsSignatures COPY ./cmd/chaininfo ./cmd/chaininfo COPY ./cmd/replay ./cmd/replay COPY ./das/dastree ./das/dastree +COPY ./das/avail ./das/avail COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c848099513..b4424307d4 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -37,6 +37,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" @@ -68,6 +69,7 @@ type BatchPoster struct { seqInboxAddr common.Address building *buildingBatch daWriter das.DataAvailabilityServiceWriter + availDAWriter avail.DataAvailabilityWriter dataPoster *dataposter.DataPoster redisLock *redislock.Simple firstEphemeralError time.Time // first time a continuous error suspected to be ephemeral occurred @@ -210,7 +212,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ L1BlockBoundBypass: time.Hour, } -func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { +func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter, availDAWriter avail.DataAvailabilityWriter) (*BatchPoster, error) { seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client()) if err != nil { return nil, err @@ -238,17 +240,18 @@ func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader * return nil, err } b := &BatchPoster{ - l1Reader: l1Reader, - inbox: inbox, - streamer: streamer, - syncMonitor: syncMonitor, - config: config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: deployInfo.SequencerInbox, - daWriter: daWriter, - redisLock: redisLock, + l1Reader: l1Reader, + inbox: inbox, + streamer: streamer, + syncMonitor: syncMonitor, + config: config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: deployInfo.SequencerInbox, + daWriter: daWriter, + availDAWriter: availDAWriter, + redisLock: redisLock, } dataPosterConfigFetcher := func() *dataposter.DataPosterConfig { return &config().DataPoster @@ -893,6 +896,16 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } + // ideally we make this part of the above statment by having everything under a single unified interface (soon TM) + if b.daWriter == nil && b.availDAWriter != nil { + // Store the data on Avail and return a marhsalled BlobPointer, which gets used as the sequencerMsg + // which is later used to retrieve the data from Avail + sequencerMsg, err = b.availDAWriter.Store(ctx, sequencerMsg) + if err != nil { + return false, err + } + } + gasLimit, err := b.estimateGas(ctx, sequencerMsg, b.building.segments.delayedMsg) if err != nil { return false, err diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index a28eebb5dc..262d9a5eb7 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil) + tracker, err := NewInboxTracker(db, streamer, nil, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index c82e45fbee..74f787161f 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -20,31 +20,34 @@ import ( "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/containers" ) type InboxTracker struct { - db ethdb.Database - txStreamer *TransactionStreamer - mutex sync.Mutex - validator *staker.BlockValidator - das arbstate.DataAvailabilityReader + db ethdb.Database + txStreamer *TransactionStreamer + mutex sync.Mutex + validator *staker.BlockValidator + das arbstate.DataAvailabilityReader + availDAReader avail.DataAvailabilityReader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, availDAReader avail.DataAvailabilityReader) (*InboxTracker, error) { // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { return nil, errors.New("data availability service required but unconfigured") } tracker := &InboxTracker{ - db: db, - txStreamer: txStreamer, - das: das, - batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), + db: db, + txStreamer: txStreamer, + das: das, + availDAReader: availDAReader, + batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil } @@ -595,7 +598,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, arbstate.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.availDAReader, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index e671a191fb..582884e08a 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -38,6 +38,7 @@ import ( "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -298,6 +299,7 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + Avail avail.DAConfig `koanf:"avail-cfg"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` Caching execution.CachingConfig `koanf:"caching"` @@ -741,6 +743,8 @@ func createNodeImpl( var daWriter das.DataAvailabilityServiceWriter var daReader das.DataAvailabilityServiceReader var dasLifecycleManager *das.LifecycleManager + var availDAWriter avail.DataAvailabilityWriter + var availDAReader avail.DataAvailabilityReader if config.DataAvailability.Enable { if config.BatchPoster.Enable { daWriter, daReader, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox) @@ -764,9 +768,16 @@ func createNodeImpl( } } else if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee { return nil, errors.New("a data availability service is required for this chain, but it was not configured") + } else if config.Avail.Enable { + availService, err := avail.NewAvailDA(config.Avail) + if err != nil { + return nil, err + } + availDAWriter = availService + availDAReader = availService } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader) + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, availDAReader) if err != nil { return nil, err } @@ -785,6 +796,7 @@ func createNodeImpl( exec.Recorder, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, + availDAReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) @@ -890,7 +902,7 @@ func createNodeImpl( if txOptsBatchPoster == nil { return nil, errors.New("batchposter, but no TxOpts") } - batchPoster, err = NewBatchPoster(ctx, rawdb.NewTable(arbDb, storage.BatchPosterPrefix), l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter) + batchPoster, err = NewBatchPoster(ctx, rawdb.NewTable(arbDb, storage.BatchPosterPrefix), l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter, availDAWriter) if err != nil { return nil, err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3995bcf308..f6488bda23 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -6,6 +6,7 @@ package arbstate import ( "bytes" "context" + "crypto/sha256" "encoding/binary" "errors" "io" @@ -20,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -50,7 +52,7 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, dasReader DataAvailabilityReader, availDAReader avail.DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -79,6 +81,21 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } } + if len(payload) > 0 && avail.IsAvailMessageHeaderByte(payload[0]) { + if availDAReader == nil { + log.Error("No Avail Reader configured, but sequencer message found with Avail header") + } else { + var err error + payload, err = RecoverPayloadFromAvailBatch(ctx, batchNum, data, availDAReader, nil) + if err != nil { + return nil, err + } + if payload == nil { + return parsedMsg, nil + } + } + } + if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -124,6 +141,52 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da return parsedMsg, nil } +func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequencerMsg []byte, availDAReader avail.DataAvailabilityReader, preimages map[arbutil.PreimageType]map[common.Hash][]byte) ([]byte, error) { + var shaPreimages map[common.Hash][]byte + if preimages != nil { + if preimages[arbutil.Sha2_256PreimageType] == nil { + preimages[arbutil.Sha2_256PreimageType] = make(map[common.Hash][]byte) + } + shaPreimages = preimages[arbutil.Sha2_256PreimageType] + } + + buf := bytes.NewBuffer(sequencerMsg[40:]) + + header, err := buf.ReadByte() + if err != nil { + log.Error("Couldn't deserialize Avail header byte", "err", err) + return nil, err + } + if !avail.IsAvailMessageHeaderByte(header) { + return nil, errors.New("tried to deserialize a message that doesn't have the Avail header") + } + + blobPointer := avail.BlobPointer{} + blobPointer.UnmarshalFromBinary(buf.Bytes()) + if err != nil { + log.Error("Couldn't unmarshal Avail blob pointer", "err", err) + return nil, err + } + + log.Info("Attempting to fetch data for", "batchNum", batchNum, "availBlockHash", blobPointer.BlockHash) + payload, err := availDAReader.Read(blobPointer) + if err != nil { + log.Error("Failed to resolve blob pointer from avail", "err", err) + return nil, err + } + + log.Info("Succesfully fetched payload from Avail", "batchNum", batchNum, "availBlockHash", blobPointer.BlockHash) + + log.Info("Recording Sha256 preimage for Avail data") + + shaDataHash := sha256.New() + shaDataHash.Write(payload) + dataHash := shaDataHash.Sum([]byte{}) + shaPreimages[common.BytesToHash(dataHash)] = payload + + return payload, nil +} + func RecoverPayloadFromDasBatch( ctx context.Context, batchNum uint64, @@ -242,6 +305,7 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 dasReader DataAvailabilityReader + availDAReader avail.DataAvailabilityReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -251,11 +315,12 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, availDAReader avail.DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, dasReader: dasReader, + availDAReader: availDAReader, keysetValidationMode: keysetValidationMode, } } @@ -276,7 +341,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, bytes, r.dasReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, bytes, r.dasReader, r.availDAReader, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index fcb80cbd73..2859e3280e 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -66,7 +66,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index bdba7c1210..227a7751b1 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -376,7 +376,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) if err != nil { return nil, err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 2fb13ceed8..2fdbd0ccbe 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -180,7 +180,7 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, nil, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/das/avail/avail.go b/das/avail/avail.go new file mode 100644 index 0000000000..84045a0eee --- /dev/null +++ b/das/avail/avail.go @@ -0,0 +1,213 @@ +package avail + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + + "time" + + gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" + "github.com/centrifuge/go-substrate-rpc-client/v4/signature" + gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" + "github.com/ethereum/go-ethereum/log" + "github.com/vedhavyas/go-subkey" +) + +// AvailMessageHeaderFlag indicates that this data is a Blob Pointer +// which will be used to retrieve data from Avail +const AvailMessageHeaderFlag byte = 0x0a + +func IsAvailMessageHeaderByte(header byte) bool { + return (AvailMessageHeaderFlag & header) > 0 +} + +type AvailDA struct { + cfg DAConfig + api *gsrpc.SubstrateAPI +} + +func NewAvailDA(cfg DAConfig) (*AvailDA, error) { + //Creating new substrate api + api, err := gsrpc.NewSubstrateAPI(cfg.ApiURL) + if err != nil { + return nil, err + } + + return &AvailDA{ + cfg: cfg, + api: api, + }, nil +} + +func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { + + Seed := a.cfg.Seed + AppID := a.cfg.AppID + + meta, err := a.api.RPC.State.GetMetadataLatest() + if err != nil { + log.Warn("cannot get metadata: error:%v", err) + return nil, err + } + + appID := 0 + // if app id is greater than 0 then it must be created before submitting data + if AppID != 0 { + appID = AppID + } + + c, err := gsrpc_types.NewCall(meta, "DataAvailability.submit_data", gsrpc_types.NewBytes(message)) + if err != nil { + log.Warn("cannot create new call: error:%v", err) + return nil, err + } + + // Create the extrinsic + ext := gsrpc_types.NewExtrinsic(c) + + genesisHash, err := a.api.RPC.Chain.GetBlockHash(0) + if err != nil { + log.Warn("cannot get block hash: error:%v", err) + return nil, err + } + + rv, err := a.api.RPC.State.GetRuntimeVersionLatest() + if err != nil { + log.Warn("cannot get runtime version: error:%v", err) + return nil, err + } + + keyringPair, err := signature.KeyringPairFromSecret(Seed, 42) + if err != nil { + log.Warn("cannot create LeyPair: error:%v", err) + return nil, err + } + + key, err := gsrpc_types.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) + if err != nil { + log.Warn("cannot create storage key: error:%v", err) + return nil, err + } + + var accountInfo gsrpc_types.AccountInfo + ok, err := a.api.RPC.State.GetStorageLatest(key, &accountInfo) + if err != nil || !ok { + log.Warn("cannot get latest storage: error:%v", err) + return nil, err + } + + nonce := GetAccountNonce(uint32(accountInfo.Nonce)) + //fmt.Println("Nonce from localDatabase:", nonce, " :::::::: from acountInfo:", accountInfo.Nonce) + o := gsrpc_types.SignatureOptions{ + BlockHash: genesisHash, + Era: gsrpc_types.ExtrinsicEra{IsMortalEra: false}, + GenesisHash: genesisHash, + Nonce: gsrpc_types.NewUCompactFromUInt(uint64(nonce)), + SpecVersion: rv.SpecVersion, + Tip: gsrpc_types.NewUCompactFromUInt(0), + AppID: gsrpc_types.NewUCompactFromUInt(uint64(appID)), + TransactionVersion: rv.TransactionVersion, + } + + // Sign the transaction using Alice's default account + err = ext.Sign(keyringPair, o) + if err != nil { + log.Warn("cannot sign: error:%v", err) + return nil, err + } + + // Send the extrinsic + sub, err := a.api.RPC.Author.SubmitAndWatchExtrinsic(ext) + if err != nil { + log.Warn("cannot submit extrinsic: error:%v", err) + return nil, err + } + + log.Info("Tx batch is submitted to Avail", "length", len(message), "address", keyringPair.Address, "appID", appID) + + defer sub.Unsubscribe() + timeout := time.After(100 * time.Second) + var blobPointer BlobPointer + for { + select { + case status := <-sub.Chan(): + if status.IsFinalized { + blobPointer = BlobPointer{BlockHash: string(status.AsFinalized.Hex()), Sender: keyringPair.Address, Nonce: o.Nonce.Int64()} + break + } + case <-timeout: + return nil, errors.New("Timitout before getting finalized status") + } + } + + log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + + blobPointerData, err := blobPointer.MarshalToBinary() + if err != nil { + log.Warn("BlobPointer MashalBinary error", "err", err) + return nil, err + } + + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) + if err != nil { + log.Warn("batch type byte serialization failed", "err", err) + return nil, err + } + + err = binary.Write(buf, binary.BigEndian, blobPointerData) + if err != nil { + log.Warn("blob pointer data serialization failed", "err", err) + return nil, err + } + + serializedBlobPointerData := buf.Bytes() + + return serializedBlobPointerData, nil +} + +func (a *AvailDA) Read(blobPointer BlobPointer) ([]byte, error) { + log.Info("Requesting data from Avail", "BlobPointer", blobPointer) + + //Intitializing variables + Hash := blobPointer.BlockHash + Address := blobPointer.Sender + Nonce := blobPointer.Nonce + + // Converting this string type into gsrpc_types.hash type + blk_hash, err := gsrpc_types.NewHashFromHexString(Hash) + if err != nil { + return nil, fmt.Errorf("unable to convert string hash into types.hash, error:%v", err) + } + + // Fetching block based on block hash + avail_blk, err := a.api.RPC.Chain.GetBlock(blk_hash) + if err != nil { + return []byte{}, fmt.Errorf("cannot get block for hash:%v and getting error:%v", Hash, err) + } + + //Extracting the required extrinsic according to the reference + for _, ext := range avail_blk.Block.Extrinsics { + //Extracting sender address for extrinsic + ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) + if err != nil { + log.Error("unable to get sender address from extrinsic", "err", err) + } + if ext_Addr == Address && ext.Signature.Nonce.Int64() == Nonce { + args := ext.Method.Args + var data []byte + err = codec.Decode(args, &data) + if err != nil { + return []byte{}, fmt.Errorf("unable to decode the extrinsic data by address: %v with nonce: %v", Address, Nonce) + } + return data, nil + } + } + + log.Info("Succesfully fetched data from Avail") + return nil, fmt.Errorf("unable to find any extrinsic for this blobPointer:%+v", blobPointer) +} diff --git a/das/avail/blob.go b/das/avail/blob.go new file mode 100644 index 0000000000..69790ad4c3 --- /dev/null +++ b/das/avail/blob.go @@ -0,0 +1,31 @@ +package avail + +import ( + "encoding/json" + "fmt" +) + +// BlobPointer contains the reference to the data blob on Avail +type BlobPointer struct { + BlockHash string // Hash for block on avail chain + Sender string // sender address to filter extrinsic out sepecifically for this address + Nonce int64 // nonce to filter specific extrinsic +} + +// MarshalBinary encodes the BlobPointer to binary +func (b *BlobPointer) MarshalToBinary() ([]byte, error) { + blobPointerData, err := json.Marshal(b) + if err != nil { + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + return blobPointerData, nil +} + +// UnmarshalBinary decodes the binary to BlobPointer +func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { + err := json.Unmarshal(blobPointerData, b) + if err != nil { + return fmt.Errorf("unable to convert avail_Blk_Ref bytes to AvailBlockRef Struct and getting error:%v", err) + } + return nil +} diff --git a/das/avail/config.go b/das/avail/config.go new file mode 100644 index 0000000000..378206ecfc --- /dev/null +++ b/das/avail/config.go @@ -0,0 +1,17 @@ +package avail + +type DAConfig struct { + Enable bool `koanf:"enable"` + ApiURL string `koanf:"api_url"` + Seed string `koanf:"seed"` + AppID int `koanf:"app_id"` +} + +func NewDAConfig(api_url string, seed string, app_id int) (*DAConfig, error) { + return &DAConfig{ + Enable: true, + ApiURL: api_url, + Seed: seed, + AppID: app_id, + }, nil +} diff --git a/das/avail/da_interface.go b/das/avail/da_interface.go new file mode 100644 index 0000000000..75452dbddc --- /dev/null +++ b/das/avail/da_interface.go @@ -0,0 +1,13 @@ +package avail + +import ( + "context" +) + +type DataAvailabilityWriter interface { + Store(context.Context, []byte) ([]byte, error) +} + +type DataAvailabilityReader interface { + Read(BlobPointer) ([]byte, error) +} diff --git a/das/avail/utils.go b/das/avail/utils.go new file mode 100644 index 0000000000..d54447eb72 --- /dev/null +++ b/das/avail/utils.go @@ -0,0 +1,12 @@ +package avail + +var localNonce uint32 = 0 + +func GetAccountNonce(accountNonce uint32) uint32 { + if accountNonce > localNonce { + localNonce = accountNonce + return accountNonce + } + localNonce++ + return localNonce +} diff --git a/go.mod b/go.mod index 4bc28b950c..6c79b475f1 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ replace github.com/VictoriaMetrics/fastcache => ./fastcache replace github.com/ethereum/go-ethereum => ./go-ethereum +replace github.com/centrifuge/go-substrate-rpc-client/v4 => github.com/availproject/go-substrate-rpc-client/v4 v4.0.12-avail-1.4.0-rc1-5e286e3 + require ( github.com/alicebob/miniredis/v2 v2.21.0 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 @@ -16,6 +18,8 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -34,9 +38,10 @@ require ( github.com/multiformats/go-multihash v0.2.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 + github.com/vedhavyas/go-subkey v1.0.3 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/term v0.6.0 - golang.org/x/tools v0.7.0 + golang.org/x/term v0.8.0 + golang.org/x/tools v0.9.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -45,6 +50,7 @@ require github.com/gofrs/flock v0.8.1 // indirect require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect @@ -73,21 +79,24 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/deckarep/golang-set v1.8.0 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect + github.com/decred/base58 v1.0.4 // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 // indirect + github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -115,6 +124,8 @@ require ( github.com/gorilla/mux v1.8.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect github.com/h2non/filetype v1.0.6 // indirect github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -200,6 +211,7 @@ require ( github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -218,6 +230,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pierrec/xxHash v0.1.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect @@ -236,7 +249,7 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/urfave/cli/v2 v2.24.1 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect @@ -262,7 +275,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect @@ -298,7 +311,7 @@ require ( github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c + github.com/holiman/uint256 v1.2.3 github.com/huin/goupnp v1.1.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -307,18 +320,18 @@ require ( github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/rs/cors v1.7.0 // indirect + github.com/rs/cors v1.8.2 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - golang.org/x/crypto v0.7.0 - golang.org/x/net v0.8.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.7.0 - golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect + golang.org/x/crypto v0.9.0 + golang.org/x/net v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.9.0 + golang.org/x/text v0.9.0 // indirect + golang.org/x/time v0.3.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index db3935001a..8e20e5a324 100644 --- a/go.sum +++ b/go.sum @@ -45,8 +45,10 @@ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOv github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= @@ -97,6 +99,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/availproject/go-substrate-rpc-client/v4 v4.0.12-avail-1.4.0-rc1-5e286e3 h1:9bPK0/Vd+uOQul3vEBSemRXO+rwqi+UXDAvFzNUlG8A= +github.com/availproject/go-substrate-rpc-client/v4 v4.0.12-avail-1.4.0-rc1-5e286e3/go.mod h1:5g1oM4Zu3BOaLpsKQ+O8PAv2kNuq+kPcA1VzFbsSqxE= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -175,6 +179,7 @@ github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufo github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -202,8 +207,11 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -244,6 +252,9 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -262,9 +273,14 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/base58 v1.0.4 h1:QJC6B0E0rXOPA8U/kw2rP+qiRJsUaE2Er+pYb3siUeA= +github.com/decred/base58 v1.0.4/go.mod h1:jJswKPEdvpFpvf7dsDvFZyLT22xZ9lWqEByX38oGd9E= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -292,8 +308,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7 h1:kgvzE5wLsLa7XKfV85VZl40QXaMCaeFtHpPwJ8fhotY= -github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -508,6 +524,7 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= @@ -520,6 +537,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -559,6 +577,11 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/h2non/filetype v1.0.6 h1:g84/+gdkAT1hnYO+tHpCLoikm13Ju55OkN4KCb1uGEQ= @@ -613,8 +636,8 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= -github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= +github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= @@ -623,6 +646,7 @@ github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFck github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -1208,6 +1232,9 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUM github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk= +github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1388,6 +1415,8 @@ github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/xxHash v0.1.5 h1:n/jBpwTHiER4xYvK3/CdPVnLDPchj8eTJFFLUb4QHBo= +github.com/pierrec/xxHash v0.1.5/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1468,8 +1497,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1589,14 +1618,16 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= -github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= +github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= +github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vedhavyas/go-subkey v1.0.3 h1:iKR33BB/akKmcR2PMlXPBeeODjWLM90EL98OrOGs8CA= +github.com/vedhavyas/go-subkey v1.0.3/go.mod h1:CloUaFQSSTdWnINfBRFjVMkWXZANW+nd8+TI5jYcl6Y= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= @@ -1742,12 +1773,14 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -1755,8 +1788,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1767,8 +1800,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= +golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1854,8 +1887,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1877,8 +1910,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1968,6 +2001,7 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1975,14 +2009,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1992,16 +2026,17 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2062,8 +2097,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index e290ffad67..b88f546f3a 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -12,6 +12,7 @@ import ( "testing" "github.com/offchainlabs/nitro/arbnode/execution" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/validator/server_api" @@ -35,11 +36,12 @@ type StatelessBlockValidator struct { recorder BlockRecorder - inboxReader InboxReaderInterface - inboxTracker InboxTrackerInterface - streamer TransactionStreamerInterface - db ethdb.Database - daService arbstate.DataAvailabilityReader + inboxReader InboxReaderInterface + inboxTracker InboxTrackerInterface + streamer TransactionStreamerInterface + db ethdb.Database + daService arbstate.DataAvailabilityReader + availDAReader avail.DataAvailabilityReader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -237,6 +239,7 @@ func NewStatelessBlockValidator( recorder BlockRecorder, arbdb ethdb.Database, das arbstate.DataAvailabilityReader, + availDAReader avail.DataAvailabilityReader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -253,6 +256,7 @@ func NewStatelessBlockValidator( streamer: streamer, db: arbdb, daService: das, + availDAReader: availDAReader, } return validator, nil } @@ -303,16 +307,26 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * continue } if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - continue + if v.daService == nil { + log.Warn("No DAS configured, but sequencer message found with DAS header") + } else { + _, err := arbstate.RecoverPayloadFromDasBatch( + ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, + ) + if err != nil { + return err + } + } } - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) - if err != nil { - return err + + if avail.IsAvailMessageHeaderByte(batch.Data[40]) { + if v.availDAReader == nil { + log.Warn("No avail DA configured, but sequencer message found with availDA header") + } else { + _, err := arbstate.RecoverPayloadFromAvailBatch(ctx, batch.Number, batch.Data, v.availDAReader, e.Preimages) + if err != nil { + return err + } } } } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 8b0811c223..ec67758618 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -82,7 +82,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race batchPosterConfig := conf.BatchPoster - batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil) + batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil, nil) Require(t, err) batchPoster.Start(ctx) defer batchPoster.StopAndWait() diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index b64a655c3e..be888f0563 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -368,7 +368,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterL2.Execution.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterL2.Execution.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -385,7 +385,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerL2.Execution.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerL2.Execution.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 36b112d03a..e40e0e83ef 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -153,6 +153,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeA.Execution.Recorder, l2nodeA.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -193,6 +194,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeB.Execution.Recorder, l2nodeB.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index b14215fbf0..0981b49442 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) From d81bc537f219ff2759bf6a399fe130beb92114b0 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Sat, 20 Jan 2024 09:39:53 +0530 Subject: [PATCH 04/22] fix: validation of recovered batch from avail --- arbstate/inbox.go | 2 +- das/avail/avail.go | 48 ++++++++++++++--------------- staker/stateless_block_validator.go | 2 +- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index f6488bda23..720bfb4b09 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -142,7 +142,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequencerMsg []byte, availDAReader avail.DataAvailabilityReader, preimages map[arbutil.PreimageType]map[common.Hash][]byte) ([]byte, error) { - var shaPreimages map[common.Hash][]byte + var shaPreimages map[common.Hash][]byte = make(map[common.Hash][]byte) if preimages != nil { if preimages[arbutil.Sha2_256PreimageType] == nil { preimages[arbutil.Sha2_256PreimageType] = make(map[common.Hash][]byte) diff --git a/das/avail/avail.go b/das/avail/avail.go index 84045a0eee..4baf408200 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -137,37 +137,37 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { case status := <-sub.Chan(): if status.IsFinalized { blobPointer = BlobPointer{BlockHash: string(status.AsFinalized.Hex()), Sender: keyringPair.Address, Nonce: o.Nonce.Int64()} - break + log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + blobPointerData, err := blobPointer.MarshalToBinary() + if err != nil { + log.Warn("BlobPointer MashalBinary error", "err", err) + return nil, err + } + + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) + if err != nil { + log.Warn("batch type byte serialization failed", "err", err) + return nil, err + } + + err = binary.Write(buf, binary.BigEndian, blobPointerData) + if err != nil { + log.Warn("blob pointer data serialization failed", "err", err) + return nil, err + } + + serializedBlobPointerData := buf.Bytes() + + return serializedBlobPointerData, nil } case <-timeout: return nil, errors.New("Timitout before getting finalized status") } } - log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + //log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) - blobPointerData, err := blobPointer.MarshalToBinary() - if err != nil { - log.Warn("BlobPointer MashalBinary error", "err", err) - return nil, err - } - - buf := new(bytes.Buffer) - err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) - if err != nil { - log.Warn("batch type byte serialization failed", "err", err) - return nil, err - } - - err = binary.Write(buf, binary.BigEndian, blobPointerData) - if err != nil { - log.Warn("blob pointer data serialization failed", "err", err) - return nil, err - } - - serializedBlobPointerData := buf.Bytes() - - return serializedBlobPointerData, nil } func (a *AvailDA) Read(blobPointer BlobPointer) ([]byte, error) { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index b88f546f3a..81c07764b1 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -306,7 +306,7 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { if v.daService == nil { log.Warn("No DAS configured, but sequencer message found with DAS header") } else { From ae3b104388e829329ad12ed5462f4a804debb0d3 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Tue, 30 Jan 2024 11:21:20 +0530 Subject: [PATCH 05/22] feat: enable fraud proof, resolved validation error --- arbstate/das_reader.go | 5 +++++ arbstate/inbox.go | 23 ++++++++++++----------- cmd/replay/main.go | 22 ++++++++++++++++++---- das/avail/avail.go | 5 +++-- das/avail/blob.go | 9 ++++++--- das/avail/da_interface.go | 2 +- 6 files changed, 45 insertions(+), 21 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index a6d351b49e..0f503196e9 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/das/dastree" ) @@ -24,6 +25,10 @@ type DataAvailabilityReader interface { ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } +type AvailDataAvailibilityReader interface { + avail.DataAvailabilityReader +} + var ErrHashMismatch = errors.New("result does not match expected hash") // DASMessageHeaderFlag indicates that this data is a certificate for the data availability service, diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 720bfb4b09..3cf7e7d7f3 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -6,7 +6,6 @@ package arbstate import ( "bytes" "context" - "crypto/sha256" "encoding/binary" "errors" "io" @@ -142,12 +141,12 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequencerMsg []byte, availDAReader avail.DataAvailabilityReader, preimages map[arbutil.PreimageType]map[common.Hash][]byte) ([]byte, error) { - var shaPreimages map[common.Hash][]byte = make(map[common.Hash][]byte) + var keccakPreimages map[common.Hash][]byte if preimages != nil { - if preimages[arbutil.Sha2_256PreimageType] == nil { - preimages[arbutil.Sha2_256PreimageType] = make(map[common.Hash][]byte) + if preimages[arbutil.Keccak256PreimageType] == nil { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) } - shaPreimages = preimages[arbutil.Sha2_256PreimageType] + keccakPreimages = preimages[arbutil.Keccak256PreimageType] } buf := bytes.NewBuffer(sequencerMsg[40:]) @@ -161,6 +160,10 @@ func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequence return nil, errors.New("tried to deserialize a message that doesn't have the Avail header") } + recordPreimage := func(key common.Hash, value []byte) { + keccakPreimages[key] = value + } + blobPointer := avail.BlobPointer{} blobPointer.UnmarshalFromBinary(buf.Bytes()) if err != nil { @@ -169,7 +172,7 @@ func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequence } log.Info("Attempting to fetch data for", "batchNum", batchNum, "availBlockHash", blobPointer.BlockHash) - payload, err := availDAReader.Read(blobPointer) + payload, err := availDAReader.Read(ctx, blobPointer) if err != nil { log.Error("Failed to resolve blob pointer from avail", "err", err) return nil, err @@ -179,11 +182,9 @@ func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequence log.Info("Recording Sha256 preimage for Avail data") - shaDataHash := sha256.New() - shaDataHash.Write(payload) - dataHash := shaDataHash.Sum([]byte{}) - shaPreimages[common.BytesToHash(dataHash)] = payload - + if keccakPreimages != nil { + dastree.RecordHash(recordPreimage, payload) + } return payload, nil } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 2fdbd0ccbe..c14f27eb17 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/das/avail" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/wavmio" @@ -117,6 +118,15 @@ func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbst return arbstate.DiscardImmediately, nil } +type PreimageAvailDAReader struct{} + +func (availDAReader *PreimageAvailDAReader) Read(ctx context.Context, blobPointer avail.BlobPointer) ([]byte, error) { + oracle := func(hash common.Hash) ([]byte, error) { + return wavmio.ResolveTypedPreimage(arbutil.Keccak256PreimageType, hash) + } + return dastree.Content(blobPointer.DasTreeRootHash, oracle) +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -166,7 +176,7 @@ func main() { panic(fmt.Sprintf("Error opening state db: %v", err.Error())) } - readMessage := func(dasEnabled bool) *arbostypes.MessageWithMetadata { + readMessage := func(dasEnabled bool, availDAEnabled bool) *arbostypes.MessageWithMetadata { var delayedMessagesRead uint64 if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() @@ -175,12 +185,16 @@ func main() { if dasEnabled { dasReader = &PreimageDASReader{} } + var availDAReader arbstate.AvailDataAvailibilityReader + if availDAEnabled { + availDAReader = &PreimageAvailDAReader{} + } backend := WavmInbox{} var keysetValidationMode = arbstate.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, nil, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, availDAReader, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { @@ -232,7 +246,7 @@ func main() { } } - message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) + message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee, true) chainContext := WavmChainContext{} batchFetcher := func(batchNum uint64) ([]byte, error) { @@ -246,7 +260,7 @@ func main() { } else { // Initialize ArbOS with this init message and create the genesis block. - message := readMessage(false) + message := readMessage(false, false) initMessage, err := message.Message.ParseInitMessage() if err != nil { diff --git a/das/avail/avail.go b/das/avail/avail.go index 4baf408200..887f12089e 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -14,6 +14,7 @@ import ( gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/das/dastree" "github.com/vedhavyas/go-subkey" ) @@ -136,7 +137,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { select { case status := <-sub.Chan(): if status.IsFinalized { - blobPointer = BlobPointer{BlockHash: string(status.AsFinalized.Hex()), Sender: keyringPair.Address, Nonce: o.Nonce.Int64()} + blobPointer = BlobPointer{BlockHash: string(status.AsFinalized.Hex()), Sender: keyringPair.Address, Nonce: o.Nonce.Int64(), DasTreeRootHash: dastree.Hash(message)} log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) blobPointerData, err := blobPointer.MarshalToBinary() if err != nil { @@ -170,7 +171,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { } -func (a *AvailDA) Read(blobPointer BlobPointer) ([]byte, error) { +func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { log.Info("Requesting data from Avail", "BlobPointer", blobPointer) //Intitializing variables diff --git a/das/avail/blob.go b/das/avail/blob.go index 69790ad4c3..51a05082a6 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -3,13 +3,16 @@ package avail import ( "encoding/json" "fmt" + + "github.com/ethereum/go-ethereum/common" ) // BlobPointer contains the reference to the data blob on Avail type BlobPointer struct { - BlockHash string // Hash for block on avail chain - Sender string // sender address to filter extrinsic out sepecifically for this address - Nonce int64 // nonce to filter specific extrinsic + BlockHash string // Hash for block on avail chain + Sender string // sender address to filter extrinsic out sepecifically for this address + Nonce int64 // nonce to filter specific extrinsic + DasTreeRootHash common.Hash } // MarshalBinary encodes the BlobPointer to binary diff --git a/das/avail/da_interface.go b/das/avail/da_interface.go index 75452dbddc..6b29fda055 100644 --- a/das/avail/da_interface.go +++ b/das/avail/da_interface.go @@ -9,5 +9,5 @@ type DataAvailabilityWriter interface { } type DataAvailabilityReader interface { - Read(BlobPointer) ([]byte, error) + Read(context.Context, BlobPointer) ([]byte, error) } From 33c4dadf64a84c3a71771ae94492667206a5a708 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 1 Feb 2024 00:42:04 +0530 Subject: [PATCH 06/22] chore: added logs for better understanding --- arbstate/inbox.go | 1 + cmd/replay/main.go | 7 ++++++- staker/block_validator.go | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3cf7e7d7f3..0110c770fd 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -183,6 +183,7 @@ func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequence log.Info("Recording Sha256 preimage for Avail data") if keccakPreimages != nil { + log.Info("Data is being recorded into the orcale", "length", len(payload)) dastree.RecordHash(recordPreimage, payload) } return payload, nil diff --git a/cmd/replay/main.go b/cmd/replay/main.go index c14f27eb17..b54162d8f2 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -124,7 +124,9 @@ func (availDAReader *PreimageAvailDAReader) Read(ctx context.Context, blobPointe oracle := func(hash common.Hash) ([]byte, error) { return wavmio.ResolveTypedPreimage(arbutil.Keccak256PreimageType, hash) } - return dastree.Content(blobPointer.DasTreeRootHash, oracle) + data, err := dastree.Content(blobPointer.DasTreeRootHash, oracle) + log.Info("Data is being retrieved from oracle", len(data)) + return data, err } // To generate: @@ -194,6 +196,7 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } + log.Info("Params passed on readMessage func", "dasEnabled", dasEnabled, "availDAEnabled", availDAEnabled) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, availDAReader, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -206,6 +209,8 @@ func main() { var newBlock *types.Block if lastBlockStateRoot != (common.Hash{}) { + log.Info("Running main func of replay binary", "lastBlockStateRoot", lastBlockStateRoot) + // ArbOS has already been initialized. // Load the chain config and then produce a block normally. diff --git a/staker/block_validator.go b/staker/block_validator.go index 94bc2a0806..7c081d14aa 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -678,6 +678,7 @@ validationsLoop: } validatorValidValidationsCounter.Inc(1) } + log.Info("Latest Validated Info:", "Start", validationStatus.Entry.Start, "End", validationStatus.Entry.End) err := v.writeLastValidated(validationStatus.Entry.End, wasmRoots) if err != nil { log.Error("failed writing new validated to database", "pos", pos, "err", err) From 6871d4dbfbae0f7a74318912ee8088cec4ef1ca8 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Fri, 16 Feb 2024 14:16:56 +0530 Subject: [PATCH 07/22] chore: optimized avail package --- das/avail/avail.go | 162 ++++++++++++++++++++++++++------------------ das/avail/config.go | 22 +++--- 2 files changed, 109 insertions(+), 75 deletions(-) diff --git a/das/avail/avail.go b/das/avail/avail.go index 887f12089e..6aaf0a3fe0 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/binary" - "errors" "fmt" "time" @@ -27,33 +26,21 @@ func IsAvailMessageHeaderByte(header byte) bool { } type AvailDA struct { - cfg DAConfig - api *gsrpc.SubstrateAPI + enable bool + timeout time.Duration + appID int + api *gsrpc.SubstrateAPI + meta *gsrpc_types.Metadata + genesisHash gsrpc_types.Hash + rv *gsrpc_types.RuntimeVersion + keyringPair signature.KeyringPair + key gsrpc_types.StorageKey } func NewAvailDA(cfg DAConfig) (*AvailDA, error) { - //Creating new substrate api - api, err := gsrpc.NewSubstrateAPI(cfg.ApiURL) - if err != nil { - return nil, err - } - - return &AvailDA{ - cfg: cfg, - api: api, - }, nil -} - -func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { - Seed := a.cfg.Seed - AppID := a.cfg.AppID - - meta, err := a.api.RPC.State.GetMetadataLatest() - if err != nil { - log.Warn("cannot get metadata: error:%v", err) - return nil, err - } + Seed := cfg.Seed + AppID := cfg.AppID appID := 0 // if app id is greater than 0 then it must be created before submitting data @@ -61,22 +48,25 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { appID = AppID } - c, err := gsrpc_types.NewCall(meta, "DataAvailability.submit_data", gsrpc_types.NewBytes(message)) + //Creating new substrate api + api, err := gsrpc.NewSubstrateAPI(cfg.ApiURL) if err != nil { - log.Warn("cannot create new call: error:%v", err) return nil, err } - // Create the extrinsic - ext := gsrpc_types.NewExtrinsic(c) + meta, err := api.RPC.State.GetMetadataLatest() + if err != nil { + log.Warn("cannot get metadata: error:%v", err) + return nil, err + } - genesisHash, err := a.api.RPC.Chain.GetBlockHash(0) + genesisHash, err := api.RPC.Chain.GetBlockHash(0) if err != nil { log.Warn("cannot get block hash: error:%v", err) return nil, err } - rv, err := a.api.RPC.State.GetRuntimeVersionLatest() + rv, err := api.RPC.State.GetRuntimeVersionLatest() if err != nil { log.Warn("cannot get runtime version: error:%v", err) return nil, err @@ -94,8 +84,32 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { return nil, err } + return &AvailDA{ + enable: cfg.Enable, + timeout: cfg.Timeout, + appID: appID, + api: api, + meta: meta, + genesisHash: genesisHash, + rv: rv, + keyringPair: keyringPair, + key: key, + }, nil +} + +func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { + + c, err := gsrpc_types.NewCall(a.meta, "DataAvailability.submit_data", gsrpc_types.NewBytes(message)) + if err != nil { + log.Warn("cannot create new call: error:%v", err) + return nil, err + } + + // Create the extrinsic + ext := gsrpc_types.NewExtrinsic(c) + var accountInfo gsrpc_types.AccountInfo - ok, err := a.api.RPC.State.GetStorageLatest(key, &accountInfo) + ok, err := a.api.RPC.State.GetStorageLatest(a.key, &accountInfo) if err != nil || !ok { log.Warn("cannot get latest storage: error:%v", err) return nil, err @@ -104,18 +118,18 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { nonce := GetAccountNonce(uint32(accountInfo.Nonce)) //fmt.Println("Nonce from localDatabase:", nonce, " :::::::: from acountInfo:", accountInfo.Nonce) o := gsrpc_types.SignatureOptions{ - BlockHash: genesisHash, + BlockHash: a.genesisHash, Era: gsrpc_types.ExtrinsicEra{IsMortalEra: false}, - GenesisHash: genesisHash, + GenesisHash: a.genesisHash, Nonce: gsrpc_types.NewUCompactFromUInt(uint64(nonce)), - SpecVersion: rv.SpecVersion, + SpecVersion: a.rv.SpecVersion, Tip: gsrpc_types.NewUCompactFromUInt(0), - AppID: gsrpc_types.NewUCompactFromUInt(uint64(appID)), - TransactionVersion: rv.TransactionVersion, + AppID: gsrpc_types.NewUCompactFromUInt(uint64(a.appID)), + TransactionVersion: a.rv.TransactionVersion, } // Sign the transaction using Alice's default account - err = ext.Sign(keyringPair, o) + err = ext.Sign(a.keyringPair, o) if err != nil { log.Warn("cannot sign: error:%v", err) return nil, err @@ -128,46 +142,62 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { return nil, err } - log.Info("Tx batch is submitted to Avail", "length", len(message), "address", keyringPair.Address, "appID", appID) + log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) defer sub.Unsubscribe() - timeout := time.After(100 * time.Second) - var blobPointer BlobPointer + timeout := time.After(time.Duration(a.timeout) * time.Second) + var finalizedblockHash gsrpc_types.Hash + +outer: for { select { case status := <-sub.Chan(): + if status.IsInBlock { + log.Info("đŸ“Ĩ Submit data extrinsic included in block %v", status.AsInBlock.Hex()) + } if status.IsFinalized { - blobPointer = BlobPointer{BlockHash: string(status.AsFinalized.Hex()), Sender: keyringPair.Address, Nonce: o.Nonce.Int64(), DasTreeRootHash: dastree.Hash(message)} - log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) - blobPointerData, err := blobPointer.MarshalToBinary() - if err != nil { - log.Warn("BlobPointer MashalBinary error", "err", err) - return nil, err - } - - buf := new(bytes.Buffer) - err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) - if err != nil { - log.Warn("batch type byte serialization failed", "err", err) - return nil, err - } - - err = binary.Write(buf, binary.BigEndian, blobPointerData) - if err != nil { - log.Warn("blob pointer data serialization failed", "err", err) - return nil, err - } - - serializedBlobPointerData := buf.Bytes() - - return serializedBlobPointerData, nil + finalizedblockHash = status.AsFinalized + break outer + } else if status.IsDropped { + return nil, fmt.Errorf("❌ Extrinsic dropped") + } else if status.IsUsurped { + return nil, fmt.Errorf("❌ Extrinsic usurped") + } else if status.IsRetracted { + return nil, fmt.Errorf("❌ Extrinsic retracted") + } else if status.IsInvalid { + return nil, fmt.Errorf("❌ Extrinsic invalid") } case <-timeout: - return nil, errors.New("Timitout before getting finalized status") + return nil, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting finalized status for extrinsic", a.timeout) } } - //log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + blobPointer := BlobPointer{BlockHash: finalizedblockHash.Hex(), Sender: a.keyringPair.Address, Nonce: o.Nonce.Int64(), DasTreeRootHash: dastree.Hash(message)} + + log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + + blobPointerData, err := blobPointer.MarshalToBinary() + if err != nil { + log.Warn("BlobPointer MashalBinary error", "err", err) + return nil, err + } + + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) + if err != nil { + log.Warn("batch type byte serialization failed", "err", err) + return nil, err + } + + err = binary.Write(buf, binary.BigEndian, blobPointerData) + if err != nil { + log.Warn("blob pointer data serialization failed", "err", err) + return nil, err + } + + serializedBlobPointerData := buf.Bytes() + + return serializedBlobPointerData, nil } diff --git a/das/avail/config.go b/das/avail/config.go index 378206ecfc..342ba6680c 100644 --- a/das/avail/config.go +++ b/das/avail/config.go @@ -1,17 +1,21 @@ package avail +import "time" + type DAConfig struct { - Enable bool `koanf:"enable"` - ApiURL string `koanf:"api_url"` - Seed string `koanf:"seed"` - AppID int `koanf:"app_id"` + Enable bool `koanf:"enable"` + ApiURL string `koanf:"api_url"` + Seed string `koanf:"seed"` + AppID int `koanf:"app_id"` + Timeout time.Duration `koanf:"timeout"` } -func NewDAConfig(api_url string, seed string, app_id int) (*DAConfig, error) { +func NewDAConfig(api_url string, seed string, app_id int, timeout time.Duration) (*DAConfig, error) { return &DAConfig{ - Enable: true, - ApiURL: api_url, - Seed: seed, - AppID: app_id, + Enable: true, + ApiURL: api_url, + Seed: seed, + AppID: app_id, + Timeout: timeout, }, nil } From a915e5ad698df69478e99c8675ca127ef8bc0fee Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Sat, 17 Feb 2024 22:01:56 +0530 Subject: [PATCH 08/22] chore: fix linter errors --- arbnode/node.go | 2 +- arbstate/inbox.go | 2 +- das/avail/avail.go | 16 ++++++++-------- das/avail/blob.go | 4 ++-- das/avail/config.go | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 582884e08a..6413a9a9ab 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -299,7 +299,7 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - Avail avail.DAConfig `koanf:"avail-cfg"` + Avail avail.DAConfig `koanf:"avail"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` Caching execution.CachingConfig `koanf:"caching"` diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 0110c770fd..8b362e24e1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -165,7 +165,7 @@ func RecoverPayloadFromAvailBatch(ctx context.Context, batchNum uint64, sequence } blobPointer := avail.BlobPointer{} - blobPointer.UnmarshalFromBinary(buf.Bytes()) + err = blobPointer.UnmarshalFromBinary(buf.Bytes()) if err != nil { log.Error("Couldn't unmarshal Avail blob pointer", "err", err) return nil, err diff --git a/das/avail/avail.go b/das/avail/avail.go index 887f12089e..e7be41b77d 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -32,7 +32,7 @@ type AvailDA struct { } func NewAvailDA(cfg DAConfig) (*AvailDA, error) { - //Creating new substrate api + // Creating new substrate api api, err := gsrpc.NewSubstrateAPI(cfg.ApiURL) if err != nil { return nil, err @@ -102,7 +102,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { } nonce := GetAccountNonce(uint32(accountInfo.Nonce)) - //fmt.Println("Nonce from localDatabase:", nonce, " :::::::: from acountInfo:", accountInfo.Nonce) + // fmt.Println("Nonce from localDatabase:", nonce, " :::::::: from acountInfo:", accountInfo.Nonce) o := gsrpc_types.SignatureOptions{ BlockHash: genesisHash, Era: gsrpc_types.ExtrinsicEra{IsMortalEra: false}, @@ -167,14 +167,14 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { } } - //log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + // log.Info("Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) } func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { log.Info("Requesting data from Avail", "BlobPointer", blobPointer) - //Intitializing variables + // Intitializing variables Hash := blobPointer.BlockHash Address := blobPointer.Sender Nonce := blobPointer.Nonce @@ -182,18 +182,18 @@ func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, er // Converting this string type into gsrpc_types.hash type blk_hash, err := gsrpc_types.NewHashFromHexString(Hash) if err != nil { - return nil, fmt.Errorf("unable to convert string hash into types.hash, error:%v", err) + return nil, fmt.Errorf("unable to convert string hash into types.hash, error:%w", err) } // Fetching block based on block hash avail_blk, err := a.api.RPC.Chain.GetBlock(blk_hash) if err != nil { - return []byte{}, fmt.Errorf("cannot get block for hash:%v and getting error:%v", Hash, err) + return []byte{}, fmt.Errorf("cannot get block for hash:%v and getting error:%w", Hash, err) } - //Extracting the required extrinsic according to the reference + // Extracting the required extrinsic according to the reference for _, ext := range avail_blk.Block.Extrinsics { - //Extracting sender address for extrinsic + // Extracting sender address for extrinsic ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) if err != nil { log.Error("unable to get sender address from extrinsic", "err", err) diff --git a/das/avail/blob.go b/das/avail/blob.go index 51a05082a6..41d014d5fe 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -19,7 +19,7 @@ type BlobPointer struct { func (b *BlobPointer) MarshalToBinary() ([]byte, error) { blobPointerData, err := json.Marshal(b) if err != nil { - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) } return blobPointerData, nil } @@ -28,7 +28,7 @@ func (b *BlobPointer) MarshalToBinary() ([]byte, error) { func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { err := json.Unmarshal(blobPointerData, b) if err != nil { - return fmt.Errorf("unable to convert avail_Blk_Ref bytes to AvailBlockRef Struct and getting error:%v", err) + return fmt.Errorf("unable to convert avail_Blk_Ref bytes to AvailBlockRef Struct and getting error:%w", err) } return nil } diff --git a/das/avail/config.go b/das/avail/config.go index 378206ecfc..ea5fd3e585 100644 --- a/das/avail/config.go +++ b/das/avail/config.go @@ -2,9 +2,9 @@ package avail type DAConfig struct { Enable bool `koanf:"enable"` - ApiURL string `koanf:"api_url"` + ApiURL string `koanf:"api-url"` Seed string `koanf:"seed"` - AppID int `koanf:"app_id"` + AppID int `koanf:"app-id"` } func NewDAConfig(api_url string, seed string, app_id int) (*DAConfig, error) { From c37b82b8469ad99c28df8350888af9c10fc9a8cd Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 22 Feb 2024 10:09:17 +0530 Subject: [PATCH 09/22] feat: queryDataProofV2 --- das/avail/avail.go | 67 +++++++++++++++++++++++++++++++++++++++++++--- nitro-testnode | 2 +- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/das/avail/avail.go b/das/avail/avail.go index 6aaf0a3fe0..c38598f318 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/das/dastree" "github.com/vedhavyas/go-subkey" + "golang.org/x/crypto/sha3" ) // AvailMessageHeaderFlag indicates that this data is a Blob Pointer @@ -25,6 +26,22 @@ func IsAvailMessageHeaderByte(header byte) bool { return (AvailMessageHeaderFlag & header) > 0 } +type ProofResponse struct { + DataProof DataProof `koanf:"dataProof"` + message []byte `koanf:"message"` +} + +// HeaderF struct represents response from queryDataProof +type DataProof struct { + DataRoot gsrpc_types.Hash `koanf:"dataRoot"` + BlobRoot gsrpc_types.Hash `koanf:"blobRoot"` + BridgeRoot gsrpc_types.Hash `koanf:"bridgeRoot"` + Proof []gsrpc_types.Hash `koanf:"proof"` + NumberOfLeaves int `koanf:"numberOfLeaves"` + LeafIndex int `koanf:"leafIndex"` + Leaf gsrpc_types.Hash `koanf:"leaf"` +} + type AvailDA struct { enable bool timeout time.Duration @@ -142,7 +159,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { return nil, err } - log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) + log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) defer sub.Unsubscribe() timeout := time.After(time.Duration(a.timeout) * time.Second) @@ -153,7 +170,7 @@ outer: select { case status := <-sub.Chan(): if status.IsInBlock { - log.Info("đŸ“Ĩ Submit data extrinsic included in block %v", status.AsInBlock.Hex()) + log.Info("đŸ“Ĩ Submit data extrinsic included in block", "blockHash", status.AsInBlock.Hex()) } if status.IsFinalized { finalizedblockHash = status.AsFinalized @@ -168,13 +185,55 @@ outer: return nil, fmt.Errorf("❌ Extrinsic invalid") } case <-timeout: - return nil, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting finalized status for extrinsic", a.timeout) + return nil, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting finalized status for extrinsic", a.timeout) + } + } + + var batchHash [32]byte + + h := sha3.NewLegacyKeccak256() + h.Write(message) + h.Sum(batchHash[:0]) + + block, err := a.api.RPC.Chain.GetBlock(finalizedblockHash) + if err != nil { + log.Warn("cannot get block: error:%v", err) + return nil, err + } + + var dataProof DataProof + for i := 1; i <= len(block.Block.Extrinsics); i++ { + // query proof + var data ProofResponse + err = a.api.Client.Call(&data, "kate_queryDataProofV2", i, finalizedblockHash) + if err != nil { + log.Warn("unable to query data proof:%v", err) + return nil, err + } + + if data.DataProof.Leaf.Hex() == fmt.Sprintf("%#x", batchHash) { + dataProof = data.DataProof + break } } + fmt.Printf("Root:%v\n", dataProof.DataRoot.Hex()) + fmt.Printf("Bridge Root:%v\n", dataProof.BridgeRoot.Hex()) + fmt.Printf("Blob Root:%v\n", dataProof.BlobRoot.Hex()) + + // print array of proof + fmt.Printf("Proof:\n") + for _, p := range dataProof.Proof { + fmt.Printf("%v\n", p.Hex()) + } + + fmt.Printf("Number of leaves: %v\n", dataProof.NumberOfLeaves) + fmt.Printf("Leaf index: %v\n", dataProof.LeafIndex) + fmt.Printf("Leaf: %v\n", dataProof.Leaf.Hex()) + blobPointer := BlobPointer{BlockHash: finalizedblockHash.Hex(), Sender: a.keyringPair.Address, Nonce: o.Nonce.Int64(), DasTreeRootHash: dastree.Hash(message)} - log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) blobPointerData, err := blobPointer.MarshalToBinary() if err != nil { diff --git a/nitro-testnode b/nitro-testnode index 7ad12c0f1b..c47cb8c643 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e +Subproject commit c47cb8c643bc8e63ff096f7f88f9152064d1532a From 894e1c167c19d3172b9ebc71a4a6c46b9e26eb3a Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Tue, 5 Mar 2024 18:15:23 +0530 Subject: [PATCH 10/22] feat: avail merkle proof verification --- contracts | 2 +- das/avail/avail.go | 166 +++++++++++++++------------------- das/avail/avail_test.go | 62 +++++++++++++ das/avail/blob.go | 91 +++++++++++++++++-- das/avail/merkleProofInput.go | 153 +++++++++++++++++++++++++++++++ nitro-testnode | 2 +- 6 files changed, 373 insertions(+), 103 deletions(-) create mode 100644 das/avail/avail_test.go create mode 100644 das/avail/merkleProofInput.go diff --git a/contracts b/contracts index b16bf0b737..59463b71cb 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b16bf0b737468382854dac28346fec8b65b55989 +Subproject commit 59463b71cb723940b5eec111e46fde28596a13a0 diff --git a/das/avail/avail.go b/das/avail/avail.go index c38598f318..4d21641605 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -1,10 +1,12 @@ package avail import ( - "bytes" "context" - "encoding/binary" + "encoding/json" "fmt" + "io" + "net/http" + "net/url" "time" @@ -26,20 +28,18 @@ func IsAvailMessageHeaderByte(header byte) bool { return (AvailMessageHeaderFlag & header) > 0 } -type ProofResponse struct { - DataProof DataProof `koanf:"dataProof"` - message []byte `koanf:"message"` -} - -// HeaderF struct represents response from queryDataProof -type DataProof struct { - DataRoot gsrpc_types.Hash `koanf:"dataRoot"` - BlobRoot gsrpc_types.Hash `koanf:"blobRoot"` - BridgeRoot gsrpc_types.Hash `koanf:"bridgeRoot"` - Proof []gsrpc_types.Hash `koanf:"proof"` - NumberOfLeaves int `koanf:"numberOfLeaves"` - LeafIndex int `koanf:"leafIndex"` - Leaf gsrpc_types.Hash `koanf:"leaf"` +type BridgdeApiResponse struct { + BlobRoot gsrpc_types.Hash `json:"blobRoot"` + BlockHash gsrpc_types.Hash `json:"blockHash"` + BridgeRoot gsrpc_types.Hash `json:"bridgeRoot"` + DataRoot gsrpc_types.Hash `json:"dataRoot"` + DataRootCommitment gsrpc_types.Hash `json:"dataRootCommitment"` + DataRootIndex uint64 `json:"dataRootIndex"` + DataRootProof []gsrpc_types.Hash `json:"dataRootProof"` + Leaf gsrpc_types.Hash `json:"leaf"` + LeafIndex uint64 `json:"leafIndex"` + LeafProof []gsrpc_types.Hash `json:"leafProof"` + RangeHash gsrpc_types.Hash `json:"rangeHash"` } type AvailDA struct { @@ -73,31 +73,31 @@ func NewAvailDA(cfg DAConfig) (*AvailDA, error) { meta, err := api.RPC.State.GetMetadataLatest() if err != nil { - log.Warn("cannot get metadata: error:%v", err) + log.Warn("⚠ī¸ cannot get metadata: error:%v", err) return nil, err } genesisHash, err := api.RPC.Chain.GetBlockHash(0) if err != nil { - log.Warn("cannot get block hash: error:%v", err) + log.Warn("⚠ī¸ cannot get block hash: error:%v", err) return nil, err } rv, err := api.RPC.State.GetRuntimeVersionLatest() if err != nil { - log.Warn("cannot get runtime version: error:%v", err) + log.Warn("⚠ī¸ cannot get runtime version: error:%v", err) return nil, err } keyringPair, err := signature.KeyringPairFromSecret(Seed, 42) if err != nil { - log.Warn("cannot create LeyPair: error:%v", err) + log.Warn("⚠ī¸ cannot create LeyPair: error:%v", err) return nil, err } key, err := gsrpc_types.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) if err != nil { - log.Warn("cannot create storage key: error:%v", err) + log.Warn("⚠ī¸ cannot create storage key: error:%v", err) return nil, err } @@ -118,7 +118,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { c, err := gsrpc_types.NewCall(a.meta, "DataAvailability.submit_data", gsrpc_types.NewBytes(message)) if err != nil { - log.Warn("cannot create new call: error:%v", err) + log.Warn("⚠ī¸ cannot create new call: error:%v", err) return nil, err } @@ -128,7 +128,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { var accountInfo gsrpc_types.AccountInfo ok, err := a.api.RPC.State.GetStorageLatest(a.key, &accountInfo) if err != nil || !ok { - log.Warn("cannot get latest storage: error:%v", err) + log.Warn("⚠ī¸ cannot get latest storage: error:%v", err) return nil, err } @@ -148,14 +148,14 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { // Sign the transaction using Alice's default account err = ext.Sign(a.keyringPair, o) if err != nil { - log.Warn("cannot sign: error:%v", err) + log.Warn("⚠ī¸ cannot sign: error:%v", err) return nil, err } // Send the extrinsic sub, err := a.api.RPC.Author.SubmitAndWatchExtrinsic(ext) if err != nil { - log.Warn("cannot submit extrinsic: error:%v", err) + log.Warn("⚠ī¸ cannot submit extrinsic: error:%v", err) return nil, err } @@ -170,7 +170,7 @@ outer: select { case status := <-sub.Chan(): if status.IsInBlock { - log.Info("đŸ“Ĩ Submit data extrinsic included in block", "blockHash", status.AsInBlock.Hex()) + log.Info("đŸ“Ĩ Submit data extrinsic included in block", "blockHash", status.AsInBlock.Hex()) } if status.IsFinalized { finalizedblockHash = status.AsFinalized @@ -189,95 +189,78 @@ outer: } } + // Calculated batch hash for batch commitment var batchHash [32]byte - h := sha3.NewLegacyKeccak256() h.Write(message) h.Sum(batchHash[:0]) - block, err := a.api.RPC.Chain.GetBlock(finalizedblockHash) - if err != nil { - log.Warn("cannot get block: error:%v", err) - return nil, err - } + extrinsicIndex := 1 + // Quering for merkle proof from Bridge Api + bridgeApiBaseURL := "https://bridge-api.sandbox.avail.tools" + blockHashPath := "/eth/proof/" + "0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed" //+ finalizedblockHash.Hex() + params := url.Values{} + params.Add("index", fmt.Sprint(extrinsicIndex)) - var dataProof DataProof - for i := 1; i <= len(block.Block.Extrinsics); i++ { - // query proof - var data ProofResponse - err = a.api.Client.Call(&data, "kate_queryDataProofV2", i, finalizedblockHash) - if err != nil { - log.Warn("unable to query data proof:%v", err) - return nil, err - } + u, _ := url.ParseRequestURI(bridgeApiBaseURL) + u.Path = blockHashPath + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) - if data.DataProof.Leaf.Hex() == fmt.Sprintf("%#x", batchHash) { - dataProof = data.DataProof - break - } + // TODO: Add time difference between batch submission and querying merkle proof + resp, err := http.Get(urlStr) + if err != nil { + return nil, fmt.Errorf("bridge Api request not successfull, err=%v", err) } - - fmt.Printf("Root:%v\n", dataProof.DataRoot.Hex()) - fmt.Printf("Bridge Root:%v\n", dataProof.BridgeRoot.Hex()) - fmt.Printf("Blob Root:%v\n", dataProof.BlobRoot.Hex()) - - // print array of proof - fmt.Printf("Proof:\n") - for _, p := range dataProof.Proof { - fmt.Printf("%v\n", p.Hex()) + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err } + var bridgdeApiResponse BridgdeApiResponse + json.Unmarshal(body, &bridgdeApiResponse) + var merkleProofInput MerklePoofInput = MerklePoofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} - fmt.Printf("Number of leaves: %v\n", dataProof.NumberOfLeaves) - fmt.Printf("Leaf index: %v\n", dataProof.LeafIndex) - fmt.Printf("Leaf: %v\n", dataProof.Leaf.Hex()) - - blobPointer := BlobPointer{BlockHash: finalizedblockHash.Hex(), Sender: a.keyringPair.Address, Nonce: o.Nonce.Int64(), DasTreeRootHash: dastree.Hash(message)} - + //Creating BlobPointer to submit over settlement layer + blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerklePoofInput: merkleProofInput} log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) - blobPointerData, err := blobPointer.MarshalToBinary() if err != nil { - log.Warn("BlobPointer MashalBinary error", "err", err) + log.Warn("⚠ī¸ BlobPointer MashalBinary error", "err", err) return nil, err } - buf := new(bytes.Buffer) - err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) - if err != nil { - log.Warn("batch type byte serialization failed", "err", err) - return nil, err - } + // buf := new(bytes.Buffer) + // err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) + // if err != nil { + // log.Warn("⚠ī¸ batch type byte serialization failed", "err", err) + // return nil, err + // } - err = binary.Write(buf, binary.BigEndian, blobPointerData) - if err != nil { - log.Warn("blob pointer data serialization failed", "err", err) - return nil, err - } + // err = binary.Write(buf, binary.BigEndian, blobPointerData) + // if err != nil { + // log.Warn("⚠ī¸ blob pointer data serialization failed", "err", err) + // return nil, err + // } - serializedBlobPointerData := buf.Bytes() + // serializedBlobPointerData := buf.Bytes() - return serializedBlobPointerData, nil + return blobPointerData, nil } func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { - log.Info("Requesting data from Avail", "BlobPointer", blobPointer) + log.Info("ℹī¸ Requesting data from Avail", "BlobPointer", blobPointer) //Intitializing variables - Hash := blobPointer.BlockHash + BlockHash := blobPointer.BlockHash Address := blobPointer.Sender - Nonce := blobPointer.Nonce - - // Converting this string type into gsrpc_types.hash type - blk_hash, err := gsrpc_types.NewHashFromHexString(Hash) - if err != nil { - return nil, fmt.Errorf("unable to convert string hash into types.hash, error:%v", err) - } + Nonce := gsrpc_types.NewUCompactFromUInt(uint64(blobPointer.Nonce)) // Fetching block based on block hash - avail_blk, err := a.api.RPC.Chain.GetBlock(blk_hash) + avail_blk, err := a.api.RPC.Chain.GetBlock(BlockHash) if err != nil { - return []byte{}, fmt.Errorf("cannot get block for hash:%v and getting error:%v", Hash, err) + return []byte{}, fmt.Errorf("❌ cannot get block for hash:%v and getting error:%v", BlockHash.Hex(), err) } //Extracting the required extrinsic according to the reference @@ -285,19 +268,20 @@ func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, er //Extracting sender address for extrinsic ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) if err != nil { - log.Error("unable to get sender address from extrinsic", "err", err) + log.Error("❌ unable to get sender address from extrinsic", "err", err) } - if ext_Addr == Address && ext.Signature.Nonce.Int64() == Nonce { + + if ext_Addr == Address && ext.Signature.Nonce.Int64() == Nonce.Int64() { args := ext.Method.Args var data []byte err = codec.Decode(args, &data) if err != nil { - return []byte{}, fmt.Errorf("unable to decode the extrinsic data by address: %v with nonce: %v", Address, Nonce) + return []byte{}, fmt.Errorf("❌ unable to decode the extrinsic data by address: %v with nonce: %v", Address, Nonce) } return data, nil } } - log.Info("Succesfully fetched data from Avail") - return nil, fmt.Errorf("unable to find any extrinsic for this blobPointer:%+v", blobPointer) + log.Info("✅ Succesfully fetched data from Avail") + return nil, fmt.Errorf("❌ unable to find any extrinsic for this blobPointer:%+v", blobPointer) } diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go new file mode 100644 index 0000000000..4a4e867942 --- /dev/null +++ b/das/avail/avail_test.go @@ -0,0 +1,62 @@ +package avail + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "testing" + + gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/ethereum/go-ethereum/common" +) + +func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { + extrinsicIndex := 1 + bridgeApiBaseURL := "https://bridge-api.sandbox.avail.tools" + blockHashPath := "/eth/proof/" + "0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed" //+ finalizedblockHash.Hex() + params := url.Values{} + params.Add("index", fmt.Sprint(extrinsicIndex)) + + u, _ := url.ParseRequestURI(bridgeApiBaseURL) + u.Path = blockHashPath + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + t.Log(urlStr) + // TODO: Add time difference between batch submission and querying merkle proof + resp, err := http.Get(urlStr) + if err != nil { + t.Fatalf("Bridge Api request not successfull, err=%v", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unable to read data from response body, err=%v", err) + } + var bridgdeApiResponse BridgdeApiResponse + json.Unmarshal(body, &bridgdeApiResponse) + t.Logf("%+v", bridgdeApiResponse) + var merkleProofInput MerklePoofInput = MerklePoofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} + t.Logf("%+v", merkleProofInput) + + var blobPointer BlobPointer = BlobPointer{gsrpc_types.NewHash([]byte{245, 54, 19, 250, 6, 182, 183, 249, 220, 94, 76, 245, 242, 132, 154, 255, 201, 78, 25, 216, 169, 232, 153, 146, 7, 236, 224, 17, 117, 201, 136, 237}), + "5EFLq4DT8M2TpSqU3gYRf38SAn7x8Vsbiuhp72E9Ri3FQxn7", + 100, + common.HexToHash("0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed"), + merkleProofInput, + } + + data, err := blobPointer.MarshalToBinary() + if err != nil { + t.Fatalf("unable to marshal blobPointer to binary, err=%v", err) + } + //t.Logf("%+v", data) + + var newBlobPointer = BlobPointer{} + if err := newBlobPointer.UnmarshalFromBinary(data[1:]); err != nil { + t.Fatalf("unable to unmarhal blobPoiter from binary, err=%v", err) + } + + t.Logf("%+v", newBlobPointer) +} diff --git a/das/avail/blob.go b/das/avail/blob.go index 51a05082a6..0c92df7539 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -1,34 +1,105 @@ package avail import ( - "encoding/json" + "bytes" + "encoding/binary" "fmt" + gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/ethereum/go-ethereum/common" ) // BlobPointer contains the reference to the data blob on Avail type BlobPointer struct { - BlockHash string // Hash for block on avail chain - Sender string // sender address to filter extrinsic out sepecifically for this address - Nonce int64 // nonce to filter specific extrinsic - DasTreeRootHash common.Hash + BlockHash gsrpc_types.Hash // Hash for block on avail chain + Sender string // sender address to filter extrinsic out sepecifically for this address + Nonce uint32 // nonce to filter specific extrinsic + DasTreeRootHash common.Hash // Das tree root hash created when preimage is stored on das tree + MerklePoofInput MerklePoofInput // Merkle proof of the blob submission } // MarshalBinary encodes the BlobPointer to binary +// serialization format: AvailMessageHeaderFlag + MerkleProofInput + BlockHash + Sender + Nonce + DasTreeRootHash +// +// minimum size = 330 bytes +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- +// +// | 1 byte | minimum bytes size = 210 | 32 byte | 48 byte | 8 byte | 32 byte | +// +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- +// +// |<-- AvailMessageHeaderFlag -->|<----- MerkleProofInput ----->|<----- BlockHash ----->|<----- Sender ----->|<----- Nonce ----->|<----- DasTreeRootHash ----->| +// +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- func (b *BlobPointer) MarshalToBinary() ([]byte, error) { - blobPointerData, err := json.Marshal(b) + + buf := new(bytes.Buffer) + + //Encoding at first the avail message header flag + if err := binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag); err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + + //Marshaling in between: The Merkle proof input, which will be required for DA verification + merkleProofInput, err := b.MerklePoofInput.MarshalToBinary() if err != nil { return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) } - return blobPointerData, nil + buf.Write(merkleProofInput) + + //Encoding at last: blockHash, sender address, nonce and DASTreeRootHash which will not be required for DA verification + if err := binary.Write(buf, binary.BigEndian, b.BlockHash); err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + var senderBytes = []byte(b.Sender) + if err = binary.Write(buf, binary.BigEndian, uint8(len(senderBytes))); err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + if err = binary.Write(buf, binary.BigEndian, senderBytes); err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + if err = binary.Write(buf, binary.BigEndian, b.Nonce); err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%v", err) + } + if err = binary.Write(buf, binary.BigEndian, b.DasTreeRootHash); err != nil { + fmt.Println("binary.Write failed:", err) + } + + return buf.Bytes(), nil } // UnmarshalBinary decodes the binary to BlobPointer func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { - err := json.Unmarshal(blobPointerData, b) - if err != nil { - return fmt.Errorf("unable to convert avail_Blk_Ref bytes to AvailBlockRef Struct and getting error:%v", err) + buf := bytes.NewReader(blobPointerData) + + if err := b.MerklePoofInput.UnmarshalFromBinary(buf); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &b.BlockHash); err != nil { + return err } + + var len uint8 + if err := binary.Read(buf, binary.BigEndian, &len); err != nil { + return err + } + var senderBytes = make([]byte, len) + if err := binary.Read(buf, binary.BigEndian, &senderBytes); err != nil { + return err + } + b.Sender = string(senderBytes) + if err := binary.Read(buf, binary.BigEndian, &b.Nonce); err != nil { + return err + } + if err := binary.Read(buf, binary.BigEndian, &b.DasTreeRootHash); err != nil { + return err + } + return nil } diff --git a/das/avail/merkleProofInput.go b/das/avail/merkleProofInput.go new file mode 100644 index 0000000000..6ddd1c6fbd --- /dev/null +++ b/das/avail/merkleProofInput.go @@ -0,0 +1,153 @@ +package avail + +import ( + "bytes" + "encoding/binary" + "fmt" + + gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" +) + +type MerklePoofInput struct { + + // proof of inclusion for the data root + DataRootProof []gsrpc_types.Hash + // proof of inclusion of leaf within blob/bridge root + LeafProof []gsrpc_types.Hash + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + RangeHash gsrpc_types.Hash + // index of the data root in the commitment tree + DataRootIndex uint64 + // blob root to check proof against, or reconstruct the data root + BlobRoot gsrpc_types.Hash + // bridge root to check proof against, or reconstruct the data root + BridgeRoot gsrpc_types.Hash + // leaf being proven + Leaf gsrpc_types.Hash + // index of the leaf in the blob/bridge root tree + LeafIndex uint64 +} + +// MarshalBinary encodes the MerkleProofInput to binary +// serialization format: Len(DataRootProof)+ + MerkleProofInput +// minimum size = 210 bytes +// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +// +// | 1 byte uint8 : DataRootProof length | 32*(len) byte : DataRootProof | 1 byte uint8 : LeafProof length | 32*(len) byte : LeafProof | 32 byte : RangeHash | 8 byte uint64 : DataRootIndex | 32 byte : BlobRoot | 32 byte : BridgeRoot | 32 byte : Leaf | 8 byte uint64 : LeafIndex | +// +// <-------- len(DataRootProof) -------->|<------- DataRootProof ------->|<------- len(LeafProof) -------->|<------- LeafProof ------->|<---- RangeHash ---->|<------- DataRootIndex ------->|<---- BlobRoot ---->|<---- BridgeRoot ---->|<---- Leaf ---->|<------- LeafIndex ------->| +// +// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +func (i *MerklePoofInput) MarshalToBinary() ([]byte, error) { + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, uint8(len(i.DataRootProof))) + if err != nil { + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.DataRootProof) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, uint8(len(i.LeafProof))) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.LeafProof) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.RangeHash) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.DataRootIndex) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.BlobRoot) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.BridgeRoot) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.Leaf) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + err = binary.Write(buf, binary.BigEndian, i.LeafIndex) + if err != nil { + fmt.Println("binary.Write failed:", err) + return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%v", err) + } + + return buf.Bytes(), nil +} + +func (m *MerklePoofInput) UnmarshalFromBinary(buf *bytes.Reader) error { + var len uint8 + if err := binary.Read(buf, binary.BigEndian, &len); err != nil { + return err + } + + m.DataRootProof = make([]gsrpc_types.Hash, len) + for i := uint8(0); i < len; i++ { + if err := binary.Read(buf, binary.BigEndian, &m.DataRootProof[i]); err != nil { + return err + } + } + + if err := binary.Read(buf, binary.BigEndian, &len); err != nil { + return err + } + m.LeafProof = make([]gsrpc_types.Hash, len) + for i := uint8(0); i < len; i++ { + if err := binary.Read(buf, binary.BigEndian, &m.LeafProof[i]); err != nil { + return err + } + } + + if err := binary.Read(buf, binary.BigEndian, &m.RangeHash); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &m.DataRootIndex); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &m.BlobRoot); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &m.BridgeRoot); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &m.Leaf); err != nil { + return err + } + + if err := binary.Read(buf, binary.BigEndian, &m.LeafIndex); err != nil { + return err + } + + return nil +} diff --git a/nitro-testnode b/nitro-testnode index c47cb8c643..05587bc616 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit c47cb8c643bc8e63ff096f7f88f9152064d1532a +Subproject commit 05587bc6169ef9b5ba2a800ac41c578777941bad From 1fd2a4f8fa8c36cdfee6824a85631bfaa7f308e8 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Wed, 6 Mar 2024 21:56:18 +0530 Subject: [PATCH 11/22] fix: avail logs --- contracts | 2 +- das/avail/avail.go | 6 +++--- nitro-testnode | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contracts b/contracts index 59463b71cb..b16bf0b737 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 59463b71cb723940b5eec111e46fde28596a13a0 +Subproject commit b16bf0b737468382854dac28346fec8b65b55989 diff --git a/das/avail/avail.go b/das/avail/avail.go index 4d21641605..4854f9a50a 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -159,7 +159,7 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { return nil, err } - log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) + log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) defer sub.Unsubscribe() timeout := time.After(time.Duration(a.timeout) * time.Second) @@ -223,7 +223,7 @@ outer: //Creating BlobPointer to submit over settlement layer blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerklePoofInput: merkleProofInput} - log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) blobPointerData, err := blobPointer.MarshalToBinary() if err != nil { log.Warn("⚠ī¸ BlobPointer MashalBinary error", "err", err) @@ -282,6 +282,6 @@ func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, er } } - log.Info("✅ Succesfully fetched data from Avail") + log.Info("✅ Succesfully fetched data from Avail") return nil, fmt.Errorf("❌ unable to find any extrinsic for this blobPointer:%+v", blobPointer) } diff --git a/nitro-testnode b/nitro-testnode index 05587bc616..7ad12c0f1b 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 05587bc6169ef9b5ba2a800ac41c578777941bad +Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e From ee1429133f20cb2d8888d97fbd4b61f0c9cd3ded Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Sun, 10 Mar 2024 23:52:30 +0530 Subject: [PATCH 12/22] chore: go dependencies --- go.mod | 3 ++- go.sum | 7 ++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8000fe4853..7e84b118df 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 + github.com/centrifuge/go-substrate-rpc-client/v4 v4.2.1 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 @@ -226,6 +226,7 @@ require ( github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect diff --git a/go.sum b/go.sum index 9a62bc8da0..a200b0d27a 100644 --- a/go.sum +++ b/go.sum @@ -214,7 +214,6 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= -github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -1255,6 +1254,8 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk= github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= @@ -2036,7 +2037,6 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2064,7 +2064,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -2075,8 +2074,6 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 00d3915f778e98be93b0d8d66b12ea4a488f09c0 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Fri, 22 Mar 2024 12:04:16 +0530 Subject: [PATCH 13/22] fix: misspell of merkleProofInput --- contracts | 2 +- das/avail/avail.go | 4 ++-- das/avail/avail_test.go | 4 ++-- das/avail/blob.go | 14 +++++++------- das/avail/merkleProofInput.go | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/contracts b/contracts index 7c46876077..6939c0b149 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7c46876077c6353c7ebdf9cd364710d357fa3914 +Subproject commit 6939c0b149255339e9ebacfc709025a82a209165 diff --git a/das/avail/avail.go b/das/avail/avail.go index 55e46c3753..4bf5f6c6e0 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -222,10 +222,10 @@ outer: if err != nil { return nil, err } - var merkleProofInput MerklePoofInput = MerklePoofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} + var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} // Creating BlobPointer to submit over settlement layer - blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerklePoofInput: merkleProofInput} + blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerkleProofInput: merkleProofInput} log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) blobPointerData, err := blobPointer.MarshalToBinary() if err != nil { diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go index 3e39b829ca..5027ef7e91 100644 --- a/das/avail/avail_test.go +++ b/das/avail/avail_test.go @@ -40,7 +40,7 @@ func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { t.Fatalf("unable to unmarshal bridge api response, err=%v", err) } t.Logf("%+v", bridgdeApiResponse) - var merkleProofInput MerklePoofInput = MerklePoofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} + var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} t.Logf("%+v", merkleProofInput) var blobPointer BlobPointer = BlobPointer{gsrpc_types.NewHash([]byte{245, 54, 19, 250, 6, 182, 183, 249, 220, 94, 76, 245, 242, 132, 154, 255, 201, 78, 25, 216, 169, 232, 153, 146, 7, 236, 224, 17, 117, 201, 136, 237}), @@ -54,7 +54,7 @@ func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { if err != nil { t.Fatalf("unable to marshal blobPointer to binary, err=%v", err) } - // t.Logf("%+v", data) + t.Logf("%x", data) var newBlobPointer = BlobPointer{} if err := newBlobPointer.UnmarshalFromBinary(data[1:]); err != nil { diff --git a/das/avail/blob.go b/das/avail/blob.go index d80397e587..1e3fa90713 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -11,11 +11,11 @@ import ( // BlobPointer contains the reference to the data blob on Avail type BlobPointer struct { - BlockHash gsrpc_types.Hash // Hash for block on avail chain - Sender string // sender address to filter extrinsic out sepecifically for this address - Nonce uint32 // nonce to filter specific extrinsic - DasTreeRootHash common.Hash // Das tree root hash created when preimage is stored on das tree - MerklePoofInput MerklePoofInput // Merkle proof of the blob submission + BlockHash gsrpc_types.Hash // Hash for block on avail chain + Sender string // sender address to filter extrinsic out sepecifically for this address + Nonce uint32 // nonce to filter specific extrinsic + DasTreeRootHash common.Hash // Das tree root hash created when preimage is stored on das tree + MerkleProofInput MerkleProofInput // Merkle proof of the blob submission } // MarshalBinary encodes the BlobPointer to binary @@ -42,7 +42,7 @@ func (b *BlobPointer) MarshalToBinary() ([]byte, error) { } // Marshaling in between: The Merkle proof input, which will be required for DA verification - merkleProofInput, err := b.MerklePoofInput.MarshalToBinary() + merkleProofInput, err := b.MerkleProofInput.MarshalToBinary() if err != nil { return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) } @@ -77,7 +77,7 @@ func (b *BlobPointer) MarshalToBinary() ([]byte, error) { func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { buf := bytes.NewReader(blobPointerData) - if err := b.MerklePoofInput.UnmarshalFromBinary(buf); err != nil { + if err := b.MerkleProofInput.UnmarshalFromBinary(buf); err != nil { return err } diff --git a/das/avail/merkleProofInput.go b/das/avail/merkleProofInput.go index 82f1d83e02..d664c53dad 100644 --- a/das/avail/merkleProofInput.go +++ b/das/avail/merkleProofInput.go @@ -8,7 +8,7 @@ import ( gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) -type MerklePoofInput struct { +type MerkleProofInput struct { // proof of inclusion for the data root DataRootProof []gsrpc_types.Hash @@ -38,7 +38,7 @@ type MerklePoofInput struct { // <-------- len(DataRootProof) -------->|<------- DataRootProof ------->|<------- len(LeafProof) -------->|<------- LeafProof ------->|<---- RangeHash ---->|<------- DataRootIndex ------->|<---- BlobRoot ---->|<---- BridgeRoot ---->|<---- Leaf ---->|<------- LeafIndex ------->| // // ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -func (i *MerklePoofInput) MarshalToBinary() ([]byte, error) { +func (i *MerkleProofInput) MarshalToBinary() ([]byte, error) { buf := new(bytes.Buffer) err := binary.Write(buf, binary.BigEndian, uint8(len(i.DataRootProof))) if err != nil { @@ -102,7 +102,7 @@ func (i *MerklePoofInput) MarshalToBinary() ([]byte, error) { return buf.Bytes(), nil } -func (m *MerklePoofInput) UnmarshalFromBinary(buf *bytes.Reader) error { +func (m *MerkleProofInput) UnmarshalFromBinary(buf *bytes.Reader) error { var len uint8 if err := binary.Read(buf, binary.BigEndian, &len); err != nil { return err From 02aa05057efb1238ad83009ff4730ecca432719a Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Tue, 2 Apr 2024 09:59:37 +0530 Subject: [PATCH 14/22] chore: refactor avail package --- arbnode/node.go | 2 +- contracts | 2 +- das/avail/avail.go | 71 ++++------------------------------------ das/avail/utils.go | 80 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 89 insertions(+), 66 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 541450a320..40fc3719ad 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -538,7 +538,7 @@ func createNodeImpl( } else if l2Config.ArbitrumChainParams.DataAvailabilityCommittee { return nil, errors.New("a data availability service is required for this chain, but it was not configured") } else if config.Avail.Enable { - availService, err := avail.NewAvailDA(config.Avail) + availService, err := avail.NewAvailDA(config.Avail, l1client) if err != nil { return nil, err } diff --git a/contracts b/contracts index 6939c0b149..ba35ff3220 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 6939c0b149255339e9ebacfc709025a82a209165 +Subproject commit ba35ff32209a677314f462580bc3abe999919aa1 diff --git a/das/avail/avail.go b/das/avail/avail.go index 4bf5f6c6e0..6ce4544747 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -2,11 +2,7 @@ package avail import ( "context" - "encoding/json" "fmt" - "io" - "net/http" - "net/url" "time" @@ -15,9 +11,9 @@ import ( gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" "github.com/vedhavyas/go-subkey" - "golang.org/x/crypto/sha3" ) // AvailMessageHeaderFlag indicates that this data is a Blob Pointer @@ -28,22 +24,9 @@ func IsAvailMessageHeaderByte(header byte) bool { return (AvailMessageHeaderFlag & header) > 0 } -type BridgdeApiResponse struct { - BlobRoot gsrpc_types.Hash `json:"blobRoot"` - BlockHash gsrpc_types.Hash `json:"blockHash"` - BridgeRoot gsrpc_types.Hash `json:"bridgeRoot"` - DataRoot gsrpc_types.Hash `json:"dataRoot"` - DataRootCommitment gsrpc_types.Hash `json:"dataRootCommitment"` - DataRootIndex uint64 `json:"dataRootIndex"` - DataRootProof []gsrpc_types.Hash `json:"dataRootProof"` - Leaf gsrpc_types.Hash `json:"leaf"` - LeafIndex uint64 `json:"leafIndex"` - LeafProof []gsrpc_types.Hash `json:"leafProof"` - RangeHash gsrpc_types.Hash `json:"rangeHash"` -} - type AvailDA struct { enable bool + l1Client arbutil.L1Interface timeout time.Duration appID int api *gsrpc.SubstrateAPI @@ -54,7 +37,7 @@ type AvailDA struct { key gsrpc_types.StorageKey } -func NewAvailDA(cfg DAConfig) (*AvailDA, error) { +func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { Seed := cfg.Seed AppID := cfg.AppID @@ -103,6 +86,7 @@ func NewAvailDA(cfg DAConfig) (*AvailDA, error) { return &AvailDA{ enable: cfg.Enable, + l1Client: l1Client, timeout: cfg.Timeout, appID: appID, api: api, @@ -189,40 +173,15 @@ outer: } } - // Calculated batch hash for batch commitment - var batchHash [32]byte - h := sha3.NewLegacyKeccak256() - h.Write(message) - h.Sum(batchHash[:0]) - - extrinsicIndex := 1 - // Quering for merkle proof from Bridge Api - bridgeApiBaseURL := "https://bridge-api.sandbox.avail.tools" - blockHashPath := "/eth/proof/" + "0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed" //+ finalizedblockHash.Hex() - params := url.Values{} - params.Add("index", fmt.Sprint(extrinsicIndex)) - - u, _ := url.ParseRequestURI(bridgeApiBaseURL) - u.Path = blockHashPath - u.RawQuery = params.Encode() - urlStr := fmt.Sprintf("%v", u) - - // TODO: Add time difference between batch submission and querying merkle proof - resp, err := http.Get(urlStr) //nolint - if err != nil { - return nil, fmt.Errorf("bridge Api request not successfull, err=%w", err) - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) + extrinsicIndex, err := GetExtrinsicIndex(a.api, finalizedblockHash, a.keyringPair.Address, o.Nonce) if err != nil { return nil, err } - var bridgdeApiResponse BridgdeApiResponse - err = json.Unmarshal(body, &bridgdeApiResponse) + + merkleProofInput, err := QueryMerkleProofInput(finalizedblockHash.Hex(), extrinsicIndex) if err != nil { return nil, err } - var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} // Creating BlobPointer to submit over settlement layer blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerkleProofInput: merkleProofInput} @@ -233,23 +192,7 @@ outer: return nil, err } - // buf := new(bytes.Buffer) - // err = binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag) - // if err != nil { - // log.Warn("⚠ī¸ batch type byte serialization failed", "err", err) - // return nil, err - // } - - // err = binary.Write(buf, binary.BigEndian, blobPointerData) - // if err != nil { - // log.Warn("⚠ī¸ blob pointer data serialization failed", "err", err) - // return nil, err - // } - - // serializedBlobPointerData := buf.Bytes() - return blobPointerData, nil - } func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { diff --git a/das/avail/utils.go b/das/avail/utils.go index d54447eb72..5bbc948ed3 100644 --- a/das/avail/utils.go +++ b/das/avail/utils.go @@ -1,5 +1,18 @@ package avail +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" + gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/ethereum/go-ethereum/log" + "github.com/vedhavyas/go-subkey" +) + var localNonce uint32 = 0 func GetAccountNonce(accountNonce uint32) uint32 { @@ -10,3 +23,70 @@ func GetAccountNonce(accountNonce uint32) uint32 { localNonce++ return localNonce } + +func GetExtrinsicIndex(api *gsrpc.SubstrateAPI, blockHash gsrpc_types.Hash, address string, nonce gsrpc_types.UCompact) (int, error) { + // Fetching block based on block hash + avail_blk, err := api.RPC.Chain.GetBlock(blockHash) + if err != nil { + return -1, fmt.Errorf("❌ cannot get block for hash:%v and getting error:%w", blockHash.Hex(), err) + } + + // Extracting the required extrinsic according to the reference + for i, ext := range avail_blk.Block.Extrinsics { + // Extracting sender address for extrinsic + ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) + if err != nil { + log.Error("❌ unable to get sender address from extrinsic", "err", err) + } + + if ext_Addr == address && ext.Signature.Nonce.Int64() == nonce.Int64() { + return i, nil + } + } + return -1, fmt.Errorf("❌ unable to find any extrinsic in block %v, from address %v with nonce %v", blockHash, address, nonce) +} + +type BridgdeApiResponse struct { + BlobRoot gsrpc_types.Hash `json:"blobRoot"` + BlockHash gsrpc_types.Hash `json:"blockHash"` + BridgeRoot gsrpc_types.Hash `json:"bridgeRoot"` + DataRoot gsrpc_types.Hash `json:"dataRoot"` + DataRootCommitment gsrpc_types.Hash `json:"dataRootCommitment"` + DataRootIndex uint64 `json:"dataRootIndex"` + DataRootProof []gsrpc_types.Hash `json:"dataRootProof"` + Leaf gsrpc_types.Hash `json:"leaf"` + LeafIndex uint64 `json:"leafIndex"` + LeafProof []gsrpc_types.Hash `json:"leafProof"` + RangeHash gsrpc_types.Hash `json:"rangeHash"` +} + +func QueryMerkleProofInput(blockHash string, extrinsicIndex int) (MerkleProofInput, error) { + // Quering for merkle proof from Bridge Api + bridgeApiBaseURL := "https://hex-bridge-api.sandbox.avail.tools/" + blockHashPath := "/eth/proof/" + blockHash + params := url.Values{} + params.Add("index", fmt.Sprint(extrinsicIndex)) + + u, _ := url.ParseRequestURI(bridgeApiBaseURL) + u.Path = blockHashPath + u.RawQuery = params.Encode() + urlStr := fmt.Sprintf("%v", u) + + // TODO: Add time difference between batch submission and querying merkle proof + resp, err := http.Get(urlStr) //nolint + if err != nil { + return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return MerkleProofInput{}, err + } + var bridgdeApiResponse BridgdeApiResponse + err = json.Unmarshal(body, &bridgdeApiResponse) + if err != nil { + return MerkleProofInput{}, err + } + var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} + return merkleProofInput, nil +} From 961359c0459402b7476a99a0b579eb22583f74a9 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 4 Apr 2024 09:56:07 +0530 Subject: [PATCH 15/22] feat: vectorx in avail --- das/avail/avail.go | 53 +- das/avail/config.go | 1 + das/avail/vectorx/abi/VectorX.abi.json | 887 +++++++++++++++++++++++++ das/avail/vectorx/vectorx.go | 50 ++ 4 files changed, 989 insertions(+), 2 deletions(-) create mode 100644 das/avail/vectorx/abi/VectorX.abi.json create mode 100644 das/avail/vectorx/vectorx.go diff --git a/das/avail/avail.go b/das/avail/avail.go index 6ce4544747..4cf33a0f34 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -3,6 +3,8 @@ package avail import ( "context" "fmt" + "os" + "strings" "time" @@ -10,8 +12,13 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v4/signature" gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/avail/vectorx" "github.com/offchainlabs/nitro/das/dastree" "github.com/vedhavyas/go-subkey" ) @@ -26,7 +33,7 @@ func IsAvailMessageHeaderByte(header byte) bool { type AvailDA struct { enable bool - l1Client arbutil.L1Interface + vectorx vectorx.VectorX timeout time.Duration appID int api *gsrpc.SubstrateAPI @@ -84,9 +91,34 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { return nil, err } + // Contract address + contractAddress := common.HexToAddress(cfg.VectorX) + + // Contract ABI (Application Binary Interface) + // Replace this with your contract's ABI + byteValue, err := os.ReadFile("./abi/vectorx.abi.json") + if err != nil { + log.Warn("⚠ī¸ cannot read abi for vectorX: error:%v", err) + return nil, err + } + vectorxABI := string(byteValue) + + // Parse the contract ABI + abi, err := abi.JSON(strings.NewReader(vectorxABI)) + if err != nil { + log.Warn("⚠ī¸ cannot create abi for vectorX: error:%v", err) + return nil, err + } + + // Create a filter query to listen for events + query := ethereum.FilterQuery{ + Addresses: []common.Address{contractAddress}, + Topics: [][]common.Hash{{abi.Events["HeadUpdate"].ID}}, + } + return &AvailDA{ enable: cfg.Enable, - l1Client: l1Client, + vectorx: vectorx.VectorX{Abi: abi, Client: *ethclient.NewClient(l1Client.Client()), Query: query}, timeout: cfg.Timeout, appID: appID, api: api, @@ -173,6 +205,23 @@ outer: } } + header, err := a.api.RPC.Chain.GetHeader(finalizedblockHash) + if err != nil { + return nil, fmt.Errorf("cannot get header:%+v", err) + } + + finalizedBlockNumber := header.Number +subs: + for { + blockNumber, err := a.vectorx.subscribeForHeaderUpdate() + if err != nil { + + } + if finalizedBlockNumber <= blockNumber { + break subs + } + } + extrinsicIndex, err := GetExtrinsicIndex(a.api, finalizedblockHash, a.keyringPair.Address, o.Nonce) if err != nil { return nil, err diff --git a/das/avail/config.go b/das/avail/config.go index ebd1071914..c56dbe1804 100644 --- a/das/avail/config.go +++ b/das/avail/config.go @@ -8,6 +8,7 @@ type DAConfig struct { Seed string `koanf:"seed"` AppID int `koanf:"app-id"` Timeout time.Duration `koanf:"timeout"` + VectorX string `koanf:"vectorx"` } func NewDAConfig(api_url string, seed string, app_id int, timeout time.Duration) (*DAConfig, error) { diff --git a/das/avail/vectorx/abi/VectorX.abi.json b/das/avail/vectorx/abi/VectorX.abi.json new file mode 100644 index 0000000000..c8667484e8 --- /dev/null +++ b/das/avail/vectorx/abi/VectorX.abi.json @@ -0,0 +1,887 @@ +[ + { + "type": "function", + "name": "DEFAULT_ADMIN_ROLE", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "GUARDIAN_ROLE", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "TIMELOCK_ROLE", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "VERSION", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "string", + "internalType": "string" + } + ], + "stateMutability": "pure" + }, + { + "type": "function", + "name": "authoritySetIdToHash", + "inputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "blockHeightToHeaderHash", + "inputs": [ + { + "name": "", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "commitHeaderRange", + "inputs": [ + { + "name": "_authoritySetId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "_targetBlock", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "dataRootCommitments", + "inputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "frozen", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "gateway", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getRoleAdmin", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "grantRole", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "hasRole", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "headerRangeFunctionId", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "initialize", + "inputs": [ + { + "name": "_params", + "type": "tuple", + "internalType": "struct VectorX.InitParameters", + "components": [ + { + "name": "guardian", + "type": "address", + "internalType": "address" + }, + { + "name": "gateway", + "type": "address", + "internalType": "address" + }, + { + "name": "height", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "header", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "authoritySetId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "authoritySetHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "headerRangeFunctionId", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "rotateFunctionId", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "latestBlock", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint32", + "internalType": "uint32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "proxiableUUID", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "renounceRole", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "requestHeaderRange", + "inputs": [ + { + "name": "_authoritySetId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "_requestedBlock", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "requestRotate", + "inputs": [ + { + "name": "_currentAuthoritySetId", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "revokeRole", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "rotate", + "inputs": [ + { + "name": "_currentAuthoritySetId", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "rotateFunctionId", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "stateRootCommitments", + "inputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "supportsInterface", + "inputs": [ + { + "name": "interfaceId", + "type": "bytes4", + "internalType": "bytes4" + } + ], + "outputs": [ + { + "name": "", + "type": "bool", + "internalType": "bool" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "updateBlockRangeData", + "inputs": [ + { + "name": "_startBlocks", + "type": "uint32[]", + "internalType": "uint32[]" + }, + { + "name": "_endBlocks", + "type": "uint32[]", + "internalType": "uint32[]" + }, + { + "name": "_headerHashes", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "_dataRootCommitments", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "_stateRootCommitments", + "type": "bytes32[]", + "internalType": "bytes32[]" + }, + { + "name": "_endAuthoritySetId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "_endAuthoritySetHash", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "updateFreeze", + "inputs": [ + { + "name": "_freeze", + "type": "bool", + "internalType": "bool" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "updateFunctionIds", + "inputs": [ + { + "name": "_headerRangeFunctionId", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "_rotateFunctionId", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "updateGateway", + "inputs": [ + { + "name": "_gateway", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "updateGenesisState", + "inputs": [ + { + "name": "_height", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "_header", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "_authoritySetId", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "_authoritySetHash", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "upgradeTo", + "inputs": [ + { + "name": "newImplementation", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "upgradeToAndCall", + "inputs": [ + { + "name": "newImplementation", + "type": "address", + "internalType": "address" + }, + { + "name": "data", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "event", + "name": "AdminChanged", + "inputs": [ + { + "name": "previousAdmin", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "newAdmin", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "AuthoritySetStored", + "inputs": [ + { + "name": "authoritySetId", + "type": "uint64", + "indexed": false, + "internalType": "uint64" + }, + { + "name": "authoritySetHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BeaconUpgraded", + "inputs": [ + { + "name": "beacon", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "HeadUpdate", + "inputs": [ + { + "name": "blockNumber", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + }, + { + "name": "headerHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "HeaderRangeCommitmentStored", + "inputs": [ + { + "name": "startBlock", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + }, + { + "name": "endBlock", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + }, + { + "name": "dataCommitment", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "stateCommitment", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "HeaderRangeRequested", + "inputs": [ + { + "name": "trustedBlock", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + }, + { + "name": "trustedHeader", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "authoritySetId", + "type": "uint64", + "indexed": false, + "internalType": "uint64" + }, + { + "name": "authoritySetHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "targetBlock", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Initialized", + "inputs": [ + { + "name": "version", + "type": "uint8", + "indexed": false, + "internalType": "uint8" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "RoleAdminChanged", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "previousAdminRole", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "newAdminRole", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "RoleGranted", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "sender", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "RoleRevoked", + "inputs": [ + { + "name": "role", + "type": "bytes32", + "indexed": true, + "internalType": "bytes32" + }, + { + "name": "account", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "sender", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "RotateRequested", + "inputs": [ + { + "name": "currentAuthoritySetId", + "type": "uint64", + "indexed": false, + "internalType": "uint64" + }, + { + "name": "currentAuthoritySetHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "Upgraded", + "inputs": [ + { + "name": "implementation", + "type": "address", + "indexed": true, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "error", + "name": "AuthoritySetNotFound", + "inputs": [] + }, + { + "type": "error", + "name": "ContractFrozen", + "inputs": [] + }, + { + "type": "error", + "name": "NextAuthoritySetExists", + "inputs": [] + }, + { + "type": "error", + "name": "OnlyGuardian", + "inputs": [ + { + "name": "sender", + "type": "address", + "internalType": "address" + } + ] + }, + { + "type": "error", + "name": "OnlyTimelock", + "inputs": [ + { + "name": "sender", + "type": "address", + "internalType": "address" + } + ] + }, + { + "type": "error", + "name": "TrustedHeaderNotFound", + "inputs": [] + } +] \ No newline at end of file diff --git a/das/avail/vectorx/vectorx.go b/das/avail/vectorx/vectorx.go new file mode 100644 index 0000000000..8b0618e3f7 --- /dev/null +++ b/das/avail/vectorx/vectorx.go @@ -0,0 +1,50 @@ +package vectorx + +import ( + "context" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +type VectorX struct { + Abi abi.ABI + Client ethclient.Client + Query ethereum.FilterQuery +} + +func (v *VectorX) subscribeForHeaderUpdate() (int, error) { + // Subscribe to the event stream + logs := make(chan types.Log) + sub, err := v.Client.SubscribeFilterLogs(context.Background(), v.Query, logs) + if err != nil { + return -1, err + } + defer sub.Unsubscribe() + + log.Info("🎧 Listening for vectorx HeadUpdate event") + + // Loop to process incoming events + for { + select { + case err := <-sub.Err(): + return -1, err + case vLog := <-logs: + // Decode the event log data + // event := struct { + // Message string + // }{} + event, err := v.Abi.Unpack("HeadUpdate", vLog.Data) + if err != nil { + return -1, err + } + + log.Info("Received message:", event) + return event[0], nil + } + } +} From c29a32b8fbb802fcfcfeaf502dce21dc9aaa29e5 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 4 Apr 2024 21:09:49 +0530 Subject: [PATCH 16/22] fix: vectorx for avail --- das/avail/avail.go | 16 ++++------------ das/avail/vectorx/vectorx.go | 25 ++++++++++++++----------- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/das/avail/avail.go b/das/avail/avail.go index 4cf33a0f34..c0235cfd4b 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -207,19 +207,11 @@ outer: header, err := a.api.RPC.Chain.GetHeader(finalizedblockHash) if err != nil { - return nil, fmt.Errorf("cannot get header:%+v", err) + return nil, fmt.Errorf("cannot get header for finalized block:%+v", err) } - - finalizedBlockNumber := header.Number -subs: - for { - blockNumber, err := a.vectorx.subscribeForHeaderUpdate() - if err != nil { - - } - if finalizedBlockNumber <= blockNumber { - break subs - } + err = a.vectorx.SubscribeForHeaderUpdate(int(header.Number), 7200) + if err != nil { + return nil, fmt.Errorf("cannot get the event for header update on vectorx:%+v", err) } extrinsicIndex, err := GetExtrinsicIndex(a.api, finalizedblockHash, a.keyringPair.Address, o.Nonce) diff --git a/das/avail/vectorx/vectorx.go b/das/avail/vectorx/vectorx.go index 8b0618e3f7..690f6709ae 100644 --- a/das/avail/vectorx/vectorx.go +++ b/das/avail/vectorx/vectorx.go @@ -2,6 +2,8 @@ package vectorx import ( "context" + "fmt" + "time" "github.com/ethereum/go-ethereum/log" @@ -17,34 +19,35 @@ type VectorX struct { Query ethereum.FilterQuery } -func (v *VectorX) subscribeForHeaderUpdate() (int, error) { +func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) error { // Subscribe to the event stream logs := make(chan types.Log) sub, err := v.Client.SubscribeFilterLogs(context.Background(), v.Query, logs) if err != nil { - return -1, err + return err } defer sub.Unsubscribe() log.Info("🎧 Listening for vectorx HeadUpdate event") - + timeout := time.After(time.Duration(t) * time.Second) // Loop to process incoming events for { select { case err := <-sub.Err(): - return -1, err + return err case vLog := <-logs: - // Decode the event log data - // event := struct { - // Message string - // }{} event, err := v.Abi.Unpack("HeadUpdate", vLog.Data) if err != nil { - return -1, err + return err } - log.Info("Received message:", event) - return event[0], nil + log.Info("🤝 New HeadUpdate event from vecotorx", event[0]) + val, _ := event[0].(uint32) + if val >= uint32(finalizedBlockNumber) { + return nil + } + case <-timeout: + return fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting HeadUpdate event from vectorx for blockNumber %v", t, finalizedBlockNumber) } } } From 722d319cd93ec935261ec0f927478968bccc416c Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 4 Apr 2024 22:41:43 +0530 Subject: [PATCH 17/22] feat: integrated with common da interface #2155 --- arbnode/inbox_tracker.go | 5 +++- arbstate/inbox.go | 48 +++++++++++++++++++-------------- arbstate/inbox_fuzz_test.go | 2 +- cmd/replay/main.go | 5 +++- system_tests/state_fuzz_test.go | 2 +- 5 files changed, 38 insertions(+), 24 deletions(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index e072cca2d0..7016f3ae66 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -613,10 +613,13 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if t.das != nil { daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) } + if t.availDAReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderAvail(t.availDAReader)) + } if t.blobReader != nil { daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, t.availDAReader, arbstate.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 486488c04b..e2874a871c 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -64,7 +64,7 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, availDAReader avail.DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -116,21 +116,6 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } } - if len(payload) > 0 && avail.IsAvailMessageHeaderByte(payload[0]) { - if availDAReader == nil { - log.Error("No Avail Reader configured, but sequencer message found with Avail header") - } else { - var err error - payload, err = RecoverPayloadFromAvailBatch(ctx, batchNum, data, availDAReader, nil) - if err != nil { - return nil, err - } - if payload == nil { - return parsedMsg, nil - } - } - } - // At this point, `payload` has not been validated by the sequencer inbox at all. // It's not safe to trust any part of the payload from this point onwards. @@ -353,6 +338,31 @@ type DataAvailabilityProvider interface { ) ([]byte, error) } +func NewDAProviderAvail(availDA AvailDataAvailibilityReader) *dAProviderForAvail { + return &dAProviderForAvail{ + availDA: availDA, + } +} + +type dAProviderForAvail struct { + availDA AvailDataAvailibilityReader +} + +func (a *dAProviderForAvail) IsValidHeaderByte(headerByte byte) bool { + return avail.IsAvailMessageHeaderByte(headerByte) +} + +func (a *dAProviderForAvail) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + return RecoverPayloadFromAvailBatch(ctx, batchNum, sequencerMsg, a.availDA, preimages) +} + // NewDAProviderDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the DataAvailabilityProvider interface independently func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { @@ -434,7 +444,6 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 daProviders []DataAvailabilityProvider - availDAReader avail.DataAvailabilityReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -444,12 +453,11 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, availDAReader avail.DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, daProviders: daProviders, - availDAReader: availDAReader, keysetValidationMode: keysetValidationMode, } } @@ -471,7 +479,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.availDAReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index dcf43fd0da..b34c02534b 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -67,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 34a306c836..e0b7f1eb7d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -230,8 +230,11 @@ func main() { if dasReader != nil { daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) } + if availDAReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderAvail(availDAReader)) + } daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, availDAReader, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 28bcbec9b4..1b29dca4b9 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) From 76cb8fb8fd576d7e8ea00a3b8b49266cf7e64f26 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Mon, 8 Apr 2024 17:39:13 +0530 Subject: [PATCH 18/22] fix: da config in avail --- das/avail/config.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/das/avail/config.go b/das/avail/config.go index c56dbe1804..2678c65716 100644 --- a/das/avail/config.go +++ b/das/avail/config.go @@ -1,6 +1,8 @@ package avail -import "time" +import ( + "time" +) type DAConfig struct { Enable bool `koanf:"enable"` @@ -11,12 +13,13 @@ type DAConfig struct { VectorX string `koanf:"vectorx"` } -func NewDAConfig(api_url string, seed string, app_id int, timeout time.Duration) (*DAConfig, error) { +func NewDAConfig(api_url string, seed string, app_id int, timeout time.Duration, vectorx string) (*DAConfig, error) { return &DAConfig{ Enable: true, ApiURL: api_url, Seed: seed, AppID: app_id, Timeout: timeout, + VectorX: vectorx, }, nil } From e45f349a83b5b273881fae047d57dae61bc33ea3 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Fri, 26 Apr 2024 20:55:43 +0530 Subject: [PATCH 19/22] fix: added abi encoding and decoding for blobPointer --- contracts | 2 +- das/avail/avail.go | 27 +- das/avail/avail_test.go | 2 +- das/avail/blob.go | 160 +++-- das/avail/config.go | 28 +- das/avail/merkleProofInput.go | 250 ++++--- das/avail/utils.go | 57 +- das/avail/vectorx/abi/VectorX.abi.json | 887 ------------------------- das/avail/vectorx/vectorx.go | 28 +- 9 files changed, 322 insertions(+), 1119 deletions(-) delete mode 100644 das/avail/vectorx/abi/VectorX.abi.json diff --git a/contracts b/contracts index ba35ff3220..ce79d6f472 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ba35ff32209a677314f462580bc3abe999919aa1 +Subproject commit ce79d6f472ad99c93403a2f15960c62003bc4277 diff --git a/das/avail/avail.go b/das/avail/avail.go index c0235cfd4b..1b389974ec 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -56,7 +56,7 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { } // Creating new substrate api - api, err := gsrpc.NewSubstrateAPI(cfg.ApiURL) + api, err := gsrpc.NewSubstrateAPI(cfg.AvailApiURL) if err != nil { return nil, err } @@ -95,18 +95,25 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { contractAddress := common.HexToAddress(cfg.VectorX) // Contract ABI (Application Binary Interface) - // Replace this with your contract's ABI - byteValue, err := os.ReadFile("./abi/vectorx.abi.json") + pwd, _ := os.Getwd() + log.Info(pwd) + // byteValue, err := os.ReadFile(pwd + "/das/avail/vectorx/abi/Vectorx.abi.json") + // if err != nil { + // log.Warn("⚠ī¸ cannot read abi for vectorX: error:%v", err) + // return nil, err + // } + // vectorxABI := string(byteValue) + + // Parse the contract ABI + abi, err := abi.JSON(strings.NewReader(vectorx.VectorxABI)) if err != nil { - log.Warn("⚠ī¸ cannot read abi for vectorX: error:%v", err) + log.Warn("⚠ī¸ cannot create abi for vectorX: error:%v", err) return nil, err } - vectorxABI := string(byteValue) - // Parse the contract ABI - abi, err := abi.JSON(strings.NewReader(vectorxABI)) + //Connect to L1 node thru web socket + client, err := ethclient.Dial(cfg.ArbSepoliaRPC) if err != nil { - log.Warn("⚠ī¸ cannot create abi for vectorX: error:%v", err) return nil, err } @@ -118,7 +125,7 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { return &AvailDA{ enable: cfg.Enable, - vectorx: vectorx.VectorX{Abi: abi, Client: *ethclient.NewClient(l1Client.Client()), Query: query}, + vectorx: vectorx.VectorX{Abi: abi, Client: client, Query: query}, timeout: cfg.Timeout, appID: appID, api: api, @@ -209,6 +216,7 @@ outer: if err != nil { return nil, fmt.Errorf("cannot get header for finalized block:%+v", err) } + err = a.vectorx.SubscribeForHeaderUpdate(int(header.Number), 7200) if err != nil { return nil, fmt.Errorf("cannot get the event for header update on vectorx:%+v", err) @@ -218,6 +226,7 @@ outer: if err != nil { return nil, err } + log.Info("Finalized extrinsic", "extrinsicIndex", extrinsicIndex) merkleProofInput, err := QueryMerkleProofInput(finalizedblockHash.Hex(), extrinsicIndex) if err != nil { diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go index 5027ef7e91..a05defd5ba 100644 --- a/das/avail/avail_test.go +++ b/das/avail/avail_test.go @@ -14,7 +14,7 @@ import ( func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { extrinsicIndex := 1 - bridgeApiBaseURL := "https://bridge-api.sandbox.avail.tools" + bridgeApiBaseURL := "https://hex-bridge-api.sandbox.avail.tools" blockHashPath := "/eth/proof/" + "0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed" //+ finalizedblockHash.Hex() params := url.Values{} params.Add("index", fmt.Sprint(extrinsicIndex)) diff --git a/das/avail/blob.go b/das/avail/blob.go index 1e3fa90713..91342d5f8f 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -4,8 +4,10 @@ import ( "bytes" "encoding/binary" "fmt" + "reflect" gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" ) @@ -18,88 +20,128 @@ type BlobPointer struct { MerkleProofInput MerkleProofInput // Merkle proof of the blob submission } +var byte32Type = abi.Type{T: abi.FixedBytesTy, Size: 32} +var uint32Type = abi.Type{Size: 32, T: abi.UintTy} +var stringType = abi.Type{T: abi.StringTy} +var byte32ArrayType = abi.Type{T: abi.SliceTy, Elem: &abi.Type{T: abi.FixedBytesTy, Size: 32}} +var uint64Type = abi.Type{Size: 64, T: abi.UintTy} +var merkleProofInputType = abi.Type{T: abi.TupleTy, TupleType: reflect.TypeOf(MerkleProofInput{}), TupleElems: []*abi.Type{&byte32ArrayType, &byte32ArrayType, &byte32Type, &uint64Type, &byte32Type, &byte32Type, &byte32Type, &uint64Type}, TupleRawNames: []string{"dataRootProof", "leafProof", "rangeHash", "dataRootIndex", "blobRoot", "bridgeRoot", "leaf", "leafIndex"}} + +var arguments = abi.Arguments{ + {Type: byte32Type}, {Type: stringType}, {Type: uint32Type}, {Type: byte32Type}, {Type: merkleProofInputType}, +} + // MarshalBinary encodes the BlobPointer to binary -// serialization format: AvailMessageHeaderFlag + MerkleProofInput + BlockHash + Sender + Nonce + DasTreeRootHash +// serialization format: AvailMessageHeaderFlag + BlockHash + Sender + Nonce + DasTreeRootHash + MerkleProofInput // -// minimum size = 330 bytes +// minimum size = 330 bytes // ------------------------------------------------------------------------------------------------------------------------------------------------------------- // -// | 1 byte | minimum bytes size = 210 | 32 byte | 48 byte | 8 byte | 32 byte | +// | 1 byte | 32 byte | 48 byte | 8 byte | 32 byte | minimum bytes size = 210 | // // ------------------------------------------------------------------------------------------------------------------------------------------------------------- // -// |<-- AvailMessageHeaderFlag -->|<----- MerkleProofInput ----->|<----- BlockHash ----->|<----- Sender ----->|<----- Nonce ----->|<----- DasTreeRootHash ----->| +// |<-- AvailMessageHeaderFlag -->|<----- BlockHash ----->|<----- Sender ----->|<----- Nonce ----->|<----- DasTreeRootHash ----->|<----- MerkleProofInput ----->| // // ------------------------------------------------------------------------------------------------------------------------------------------------------------- func (b *BlobPointer) MarshalToBinary() ([]byte, error) { - - buf := new(bytes.Buffer) + packedData, err := arguments.PackValues([]interface{}{b.BlockHash, b.Sender, b.Nonce, b.DasTreeRootHash, b.MerkleProofInput}) + if err != nil { + return []byte{}, fmt.Errorf("unable to covert the blobPointer into array of bytes and getting error:%v", err) + } // Encoding at first the avail message header flag + buf := new(bytes.Buffer) if err := binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag); err != nil { fmt.Println("binary.Write failed:", err) return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) } + serializedBlobPointerData := append(buf.Bytes(), packedData...) + return serializedBlobPointerData, nil +} - // Marshaling in between: The Merkle proof input, which will be required for DA verification - merkleProofInput, err := b.MerkleProofInput.MarshalToBinary() +func (b *BlobPointer) UnmarshalFromBinary(data []byte) error { + unpackedData, err := arguments.UnpackValues(data[1:]) if err != nil { - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) + return fmt.Errorf("unable to covert the data bytes into blobPointer and getting error:%v", err) } - buf.Write(merkleProofInput) + b.BlockHash = unpackedData[0].([32]uint8) + b.Sender = unpackedData[1].(string) + b.Nonce = unpackedData[2].(uint32) + b.DasTreeRootHash = unpackedData[3].([32]uint8) + b.MerkleProofInput = unpackedData[4].(MerkleProofInput) + return nil +} - // Encoding at last: blockHash, sender address, nonce and DASTreeRootHash which will not be required for DA verification - if err := binary.Write(buf, binary.BigEndian, b.BlockHash); err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) - } - var senderBytes = []byte(b.Sender) - if err = binary.Write(buf, binary.BigEndian, uint8(len(senderBytes))); err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) - } - if err = binary.Write(buf, binary.BigEndian, senderBytes); err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) - } - if err = binary.Write(buf, binary.BigEndian, b.Nonce); err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) - } - if err = binary.Write(buf, binary.BigEndian, b.DasTreeRootHash); err != nil { - fmt.Println("binary.Write failed:", err) - } +// func (b *BlobPointer) MarshalToBinary() ([]byte, error) { - return buf.Bytes(), nil -} +// buf := new(bytes.Buffer) -// UnmarshalBinary decodes the binary to BlobPointer -func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { - buf := bytes.NewReader(blobPointerData) +// // Encoding at first the avail message header flag +// if err := binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag); err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } - if err := b.MerkleProofInput.UnmarshalFromBinary(buf); err != nil { - return err - } +// // Marshaling in between: The Merkle proof input, which will be required for DA verification +// merkleProofInput, err := b.MerkleProofInput.MarshalToBinary() +// if err != nil { +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } +// buf.Write(merkleProofInput) - if err := binary.Read(buf, binary.BigEndian, &b.BlockHash); err != nil { - return err - } +// // Encoding at last: blockHash, sender address, nonce and DASTreeRootHash which will not be required for DA verification +// if err := binary.Write(buf, binary.BigEndian, b.BlockHash); err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } +// var senderBytes = []byte(b.Sender) +// if err = binary.Write(buf, binary.BigEndian, uint8(len(senderBytes))); err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } +// if err = binary.Write(buf, binary.BigEndian, senderBytes); err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } +// if err = binary.Write(buf, binary.BigEndian, b.Nonce); err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) +// } +// if err = binary.Write(buf, binary.BigEndian, b.DasTreeRootHash); err != nil { +// fmt.Println("binary.Write failed:", err) +// } - var len uint8 - if err := binary.Read(buf, binary.BigEndian, &len); err != nil { - return err - } - var senderBytes = make([]byte, len) - if err := binary.Read(buf, binary.BigEndian, &senderBytes); err != nil { - return err - } - b.Sender = string(senderBytes) - if err := binary.Read(buf, binary.BigEndian, &b.Nonce); err != nil { - return err - } - if err := binary.Read(buf, binary.BigEndian, &b.DasTreeRootHash); err != nil { - return err - } +// return buf.Bytes(), nil +// } - return nil -} +// // UnmarshalBinary decodes the binary to BlobPointer +// func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { +// buf := bytes.NewReader(blobPointerData) + +// if err := b.MerkleProofInput.UnmarshalFromBinary(buf); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &b.BlockHash); err != nil { +// return err +// } + +// var len uint8 +// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { +// return err +// } +// var senderBytes = make([]byte, len) +// if err := binary.Read(buf, binary.BigEndian, &senderBytes); err != nil { +// return err +// } +// b.Sender = string(senderBytes) +// if err := binary.Read(buf, binary.BigEndian, &b.Nonce); err != nil { +// return err +// } +// if err := binary.Read(buf, binary.BigEndian, &b.DasTreeRootHash); err != nil { +// return err +// } + +// return nil +// } diff --git a/das/avail/config.go b/das/avail/config.go index 2678c65716..2d9c861fa0 100644 --- a/das/avail/config.go +++ b/das/avail/config.go @@ -5,21 +5,23 @@ import ( ) type DAConfig struct { - Enable bool `koanf:"enable"` - ApiURL string `koanf:"api-url"` - Seed string `koanf:"seed"` - AppID int `koanf:"app-id"` - Timeout time.Duration `koanf:"timeout"` - VectorX string `koanf:"vectorx"` + Enable bool `koanf:"enable"` + AvailApiURL string `koanf:"avail-api-url"` + Seed string `koanf:"seed"` + AppID int `koanf:"app-id"` + Timeout time.Duration `koanf:"timeout"` + VectorX string `koanf:"vectorx"` + ArbSepoliaRPC string `koanf:"arbsepolia-rpc"` } -func NewDAConfig(api_url string, seed string, app_id int, timeout time.Duration, vectorx string) (*DAConfig, error) { +func NewDAConfig(avail_api_url string, seed string, app_id int, timeout time.Duration, vectorx string, arbSepolia_rpc string) (*DAConfig, error) { return &DAConfig{ - Enable: true, - ApiURL: api_url, - Seed: seed, - AppID: app_id, - Timeout: timeout, - VectorX: vectorx, + Enable: true, + AvailApiURL: avail_api_url, + Seed: seed, + AppID: app_id, + Timeout: timeout, + VectorX: vectorx, + ArbSepoliaRPC: arbSepolia_rpc, }, nil } diff --git a/das/avail/merkleProofInput.go b/das/avail/merkleProofInput.go index d664c53dad..fee245d833 100644 --- a/das/avail/merkleProofInput.go +++ b/das/avail/merkleProofInput.go @@ -1,31 +1,23 @@ package avail -import ( - "bytes" - "encoding/binary" - "fmt" - - gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" -) - type MerkleProofInput struct { // proof of inclusion for the data root - DataRootProof []gsrpc_types.Hash + DataRootProof [][32]byte `json:"dataRootProof"` // proof of inclusion of leaf within blob/bridge root - LeafProof []gsrpc_types.Hash + LeafProof [][32]byte `json:"leafProof"` // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx - RangeHash gsrpc_types.Hash + RangeHash [32]byte `json:"rangeHash"` // index of the data root in the commitment tree - DataRootIndex uint64 + DataRootIndex uint64 `json:"dataRootIndex"` // blob root to check proof against, or reconstruct the data root - BlobRoot gsrpc_types.Hash + BlobRoot [32]byte `json:"blobRoot"` // bridge root to check proof against, or reconstruct the data root - BridgeRoot gsrpc_types.Hash + BridgeRoot [32]byte `json:"bridgeRoot"` // leaf being proven - Leaf gsrpc_types.Hash + Leaf [32]byte `json:"leaf"` // index of the leaf in the blob/bridge root tree - LeafIndex uint64 + LeafIndex uint64 `json:"leafIndex"` } // MarshalBinary encodes the MerkleProofInput to binary @@ -38,116 +30,116 @@ type MerkleProofInput struct { // <-------- len(DataRootProof) -------->|<------- DataRootProof ------->|<------- len(LeafProof) -------->|<------- LeafProof ------->|<---- RangeHash ---->|<------- DataRootIndex ------->|<---- BlobRoot ---->|<---- BridgeRoot ---->|<---- Leaf ---->|<------- LeafIndex ------->| // // ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -func (i *MerkleProofInput) MarshalToBinary() ([]byte, error) { - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.BigEndian, uint8(len(i.DataRootProof))) - if err != nil { - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.DataRootProof) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, uint8(len(i.LeafProof))) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.LeafProof) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.RangeHash) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.DataRootIndex) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.BlobRoot) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.BridgeRoot) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.Leaf) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - err = binary.Write(buf, binary.BigEndian, i.LeafIndex) - if err != nil { - fmt.Println("binary.Write failed:", err) - return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) - } - - return buf.Bytes(), nil -} - -func (m *MerkleProofInput) UnmarshalFromBinary(buf *bytes.Reader) error { - var len uint8 - if err := binary.Read(buf, binary.BigEndian, &len); err != nil { - return err - } - - m.DataRootProof = make([]gsrpc_types.Hash, len) - for i := uint8(0); i < len; i++ { - if err := binary.Read(buf, binary.BigEndian, &m.DataRootProof[i]); err != nil { - return err - } - } - - if err := binary.Read(buf, binary.BigEndian, &len); err != nil { - return err - } - m.LeafProof = make([]gsrpc_types.Hash, len) - for i := uint8(0); i < len; i++ { - if err := binary.Read(buf, binary.BigEndian, &m.LeafProof[i]); err != nil { - return err - } - } - - if err := binary.Read(buf, binary.BigEndian, &m.RangeHash); err != nil { - return err - } - - if err := binary.Read(buf, binary.BigEndian, &m.DataRootIndex); err != nil { - return err - } - - if err := binary.Read(buf, binary.BigEndian, &m.BlobRoot); err != nil { - return err - } - - if err := binary.Read(buf, binary.BigEndian, &m.BridgeRoot); err != nil { - return err - } - - if err := binary.Read(buf, binary.BigEndian, &m.Leaf); err != nil { - return err - } - - if err := binary.Read(buf, binary.BigEndian, &m.LeafIndex); err != nil { - return err - } - - return nil -} +// func (i *MerkleProofInput) MarshalToBinary() ([]byte, error) { +// buf := new(bytes.Buffer) +// err := binary.Write(buf, binary.BigEndian, uint8(len(i.DataRootProof))) +// if err != nil { +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.DataRootProof) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, uint8(len(i.LeafProof))) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.LeafProof) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.RangeHash) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.DataRootIndex) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.BlobRoot) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.BridgeRoot) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.Leaf) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// err = binary.Write(buf, binary.BigEndian, i.LeafIndex) +// if err != nil { +// fmt.Println("binary.Write failed:", err) +// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) +// } + +// return buf.Bytes(), nil +// } + +// func (m *MerkleProofInput) UnmarshalFromBinary(buf *bytes.Reader) error { +// var len uint8 +// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { +// return err +// } + +// m.DataRootProof = make([]gsrpc_types.Hash, len) +// for i := uint8(0); i < len; i++ { +// if err := binary.Read(buf, binary.BigEndian, &m.DataRootProof[i]); err != nil { +// return err +// } +// } + +// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { +// return err +// } +// m.LeafProof = make([]gsrpc_types.Hash, len) +// for i := uint8(0); i < len; i++ { +// if err := binary.Read(buf, binary.BigEndian, &m.LeafProof[i]); err != nil { +// return err +// } +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.RangeHash); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.DataRootIndex); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.BlobRoot); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.BridgeRoot); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.Leaf); err != nil { +// return err +// } + +// if err := binary.Read(buf, binary.BigEndian, &m.LeafIndex); err != nil { +// return err +// } + +// return nil +// } diff --git a/das/avail/utils.go b/das/avail/utils.go index 5bbc948ed3..382e858f18 100644 --- a/das/avail/utils.go +++ b/das/avail/utils.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/url" + "time" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" @@ -46,7 +47,7 @@ func GetExtrinsicIndex(api *gsrpc.SubstrateAPI, blockHash gsrpc_types.Hash, addr return -1, fmt.Errorf("❌ unable to find any extrinsic in block %v, from address %v with nonce %v", blockHash, address, nonce) } -type BridgdeApiResponse struct { +type BridgeApiResponse struct { BlobRoot gsrpc_types.Hash `json:"blobRoot"` BlockHash gsrpc_types.Hash `json:"blockHash"` BridgeRoot gsrpc_types.Hash `json:"bridgeRoot"` @@ -72,21 +73,43 @@ func QueryMerkleProofInput(blockHash string, extrinsicIndex int) (MerkleProofInp u.RawQuery = params.Encode() urlStr := fmt.Sprintf("%v", u) - // TODO: Add time difference between batch submission and querying merkle proof - resp, err := http.Get(urlStr) //nolint - if err != nil { - return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return MerkleProofInput{}, err - } - var bridgdeApiResponse BridgdeApiResponse - err = json.Unmarshal(body, &bridgdeApiResponse) - if err != nil { - return MerkleProofInput{}, err + for { + resp, err := http.Get(urlStr) //nolint + if err != nil { + return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + log.Info("MerkleProofInput is not yet available from bridge-api", "status", resp.Status) + time.Sleep(3 * time.Minute) + continue + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return MerkleProofInput{}, err + } + fmt.Println(string(body)) + var bridgeApiResponse BridgeApiResponse + err = json.Unmarshal(body, &bridgeApiResponse) + if err != nil { + return MerkleProofInput{}, err + } + + var byte32ArrayDataRootProof [][32]byte + for _, hash := range bridgeApiResponse.DataRootProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + byte32ArrayDataRootProof = append(byte32ArrayDataRootProof, byte32Array) + } + var byte32ArrayLeafProof [][32]byte + for _, hash := range bridgeApiResponse.LeafProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + byte32ArrayLeafProof = append(byte32ArrayLeafProof, byte32Array) + } + var merkleProofInput MerkleProofInput = MerkleProofInput{byte32ArrayDataRootProof, byte32ArrayLeafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} + return merkleProofInput, nil } - var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} - return merkleProofInput, nil } diff --git a/das/avail/vectorx/abi/VectorX.abi.json b/das/avail/vectorx/abi/VectorX.abi.json deleted file mode 100644 index c8667484e8..0000000000 --- a/das/avail/vectorx/abi/VectorX.abi.json +++ /dev/null @@ -1,887 +0,0 @@ -[ - { - "type": "function", - "name": "DEFAULT_ADMIN_ROLE", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "GUARDIAN_ROLE", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "TIMELOCK_ROLE", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "VERSION", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "string", - "internalType": "string" - } - ], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "authoritySetIdToHash", - "inputs": [ - { - "name": "", - "type": "uint64", - "internalType": "uint64" - } - ], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "blockHeightToHeaderHash", - "inputs": [ - { - "name": "", - "type": "uint32", - "internalType": "uint32" - } - ], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "commitHeaderRange", - "inputs": [ - { - "name": "_authoritySetId", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "_targetBlock", - "type": "uint32", - "internalType": "uint32" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "dataRootCommitments", - "inputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "frozen", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bool", - "internalType": "bool" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "gateway", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "address", - "internalType": "address" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "getRoleAdmin", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "grantRole", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "internalType": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "hasRole", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "internalType": "address" - } - ], - "outputs": [ - { - "name": "", - "type": "bool", - "internalType": "bool" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "headerRangeFunctionId", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "initialize", - "inputs": [ - { - "name": "_params", - "type": "tuple", - "internalType": "struct VectorX.InitParameters", - "components": [ - { - "name": "guardian", - "type": "address", - "internalType": "address" - }, - { - "name": "gateway", - "type": "address", - "internalType": "address" - }, - { - "name": "height", - "type": "uint32", - "internalType": "uint32" - }, - { - "name": "header", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "authoritySetId", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "authoritySetHash", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "headerRangeFunctionId", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "rotateFunctionId", - "type": "bytes32", - "internalType": "bytes32" - } - ] - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "latestBlock", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "uint32", - "internalType": "uint32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "proxiableUUID", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "renounceRole", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "internalType": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "requestHeaderRange", - "inputs": [ - { - "name": "_authoritySetId", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "_requestedBlock", - "type": "uint32", - "internalType": "uint32" - } - ], - "outputs": [], - "stateMutability": "payable" - }, - { - "type": "function", - "name": "requestRotate", - "inputs": [ - { - "name": "_currentAuthoritySetId", - "type": "uint64", - "internalType": "uint64" - } - ], - "outputs": [], - "stateMutability": "payable" - }, - { - "type": "function", - "name": "revokeRole", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "internalType": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "rotate", - "inputs": [ - { - "name": "_currentAuthoritySetId", - "type": "uint64", - "internalType": "uint64" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "rotateFunctionId", - "inputs": [], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "stateRootCommitments", - "inputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [ - { - "name": "", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "supportsInterface", - "inputs": [ - { - "name": "interfaceId", - "type": "bytes4", - "internalType": "bytes4" - } - ], - "outputs": [ - { - "name": "", - "type": "bool", - "internalType": "bool" - } - ], - "stateMutability": "view" - }, - { - "type": "function", - "name": "updateBlockRangeData", - "inputs": [ - { - "name": "_startBlocks", - "type": "uint32[]", - "internalType": "uint32[]" - }, - { - "name": "_endBlocks", - "type": "uint32[]", - "internalType": "uint32[]" - }, - { - "name": "_headerHashes", - "type": "bytes32[]", - "internalType": "bytes32[]" - }, - { - "name": "_dataRootCommitments", - "type": "bytes32[]", - "internalType": "bytes32[]" - }, - { - "name": "_stateRootCommitments", - "type": "bytes32[]", - "internalType": "bytes32[]" - }, - { - "name": "_endAuthoritySetId", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "_endAuthoritySetHash", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "updateFreeze", - "inputs": [ - { - "name": "_freeze", - "type": "bool", - "internalType": "bool" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "updateFunctionIds", - "inputs": [ - { - "name": "_headerRangeFunctionId", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "_rotateFunctionId", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "updateGateway", - "inputs": [ - { - "name": "_gateway", - "type": "address", - "internalType": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "updateGenesisState", - "inputs": [ - { - "name": "_height", - "type": "uint32", - "internalType": "uint32" - }, - { - "name": "_header", - "type": "bytes32", - "internalType": "bytes32" - }, - { - "name": "_authoritySetId", - "type": "uint64", - "internalType": "uint64" - }, - { - "name": "_authoritySetHash", - "type": "bytes32", - "internalType": "bytes32" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "upgradeTo", - "inputs": [ - { - "name": "newImplementation", - "type": "address", - "internalType": "address" - } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "upgradeToAndCall", - "inputs": [ - { - "name": "newImplementation", - "type": "address", - "internalType": "address" - }, - { - "name": "data", - "type": "bytes", - "internalType": "bytes" - } - ], - "outputs": [], - "stateMutability": "payable" - }, - { - "type": "event", - "name": "AdminChanged", - "inputs": [ - { - "name": "previousAdmin", - "type": "address", - "indexed": false, - "internalType": "address" - }, - { - "name": "newAdmin", - "type": "address", - "indexed": false, - "internalType": "address" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "AuthoritySetStored", - "inputs": [ - { - "name": "authoritySetId", - "type": "uint64", - "indexed": false, - "internalType": "uint64" - }, - { - "name": "authoritySetHash", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "BeaconUpgraded", - "inputs": [ - { - "name": "beacon", - "type": "address", - "indexed": true, - "internalType": "address" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "HeadUpdate", - "inputs": [ - { - "name": "blockNumber", - "type": "uint32", - "indexed": false, - "internalType": "uint32" - }, - { - "name": "headerHash", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "HeaderRangeCommitmentStored", - "inputs": [ - { - "name": "startBlock", - "type": "uint32", - "indexed": false, - "internalType": "uint32" - }, - { - "name": "endBlock", - "type": "uint32", - "indexed": false, - "internalType": "uint32" - }, - { - "name": "dataCommitment", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - }, - { - "name": "stateCommitment", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "HeaderRangeRequested", - "inputs": [ - { - "name": "trustedBlock", - "type": "uint32", - "indexed": false, - "internalType": "uint32" - }, - { - "name": "trustedHeader", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - }, - { - "name": "authoritySetId", - "type": "uint64", - "indexed": false, - "internalType": "uint64" - }, - { - "name": "authoritySetHash", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - }, - { - "name": "targetBlock", - "type": "uint32", - "indexed": false, - "internalType": "uint32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "Initialized", - "inputs": [ - { - "name": "version", - "type": "uint8", - "indexed": false, - "internalType": "uint8" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "RoleAdminChanged", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - }, - { - "name": "previousAdminRole", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - }, - { - "name": "newAdminRole", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "RoleGranted", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "indexed": true, - "internalType": "address" - }, - { - "name": "sender", - "type": "address", - "indexed": true, - "internalType": "address" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "RoleRevoked", - "inputs": [ - { - "name": "role", - "type": "bytes32", - "indexed": true, - "internalType": "bytes32" - }, - { - "name": "account", - "type": "address", - "indexed": true, - "internalType": "address" - }, - { - "name": "sender", - "type": "address", - "indexed": true, - "internalType": "address" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "RotateRequested", - "inputs": [ - { - "name": "currentAuthoritySetId", - "type": "uint64", - "indexed": false, - "internalType": "uint64" - }, - { - "name": "currentAuthoritySetHash", - "type": "bytes32", - "indexed": false, - "internalType": "bytes32" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "Upgraded", - "inputs": [ - { - "name": "implementation", - "type": "address", - "indexed": true, - "internalType": "address" - } - ], - "anonymous": false - }, - { - "type": "error", - "name": "AuthoritySetNotFound", - "inputs": [] - }, - { - "type": "error", - "name": "ContractFrozen", - "inputs": [] - }, - { - "type": "error", - "name": "NextAuthoritySetExists", - "inputs": [] - }, - { - "type": "error", - "name": "OnlyGuardian", - "inputs": [ - { - "name": "sender", - "type": "address", - "internalType": "address" - } - ] - }, - { - "type": "error", - "name": "OnlyTimelock", - "inputs": [ - { - "name": "sender", - "type": "address", - "internalType": "address" - } - ] - }, - { - "type": "error", - "name": "TrustedHeaderNotFound", - "inputs": [] - } -] \ No newline at end of file diff --git a/das/avail/vectorx/vectorx.go b/das/avail/vectorx/vectorx.go index 690f6709ae..c5f895d14b 100644 --- a/das/avail/vectorx/vectorx.go +++ b/das/avail/vectorx/vectorx.go @@ -13,9 +13,31 @@ import ( "github.com/ethereum/go-ethereum/ethclient" ) +const VectorxABI = `[ + { + "type": "event", + "name": "HeadUpdate", + "inputs": [ + { + "name": "blockNumber", + "type": "uint32", + "indexed": false, + "internalType": "uint32" + }, + { + "name": "headerHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +]` + type VectorX struct { Abi abi.ABI - Client ethclient.Client + Client *ethclient.Client Query ethereum.FilterQuery } @@ -28,7 +50,7 @@ func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) er } defer sub.Unsubscribe() - log.Info("🎧 Listening for vectorx HeadUpdate event") + log.Info("🎧 Listening for vectorx HeadUpdate event") timeout := time.After(time.Duration(t) * time.Second) // Loop to process incoming events for { @@ -41,7 +63,7 @@ func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) er return err } - log.Info("🤝 New HeadUpdate event from vecotorx", event[0]) + log.Info("🤝 New HeadUpdate event from vecotorx", "blockNumber", event[0]) val, _ := event[0].(uint32) if val >= uint32(finalizedBlockNumber) { return nil From 8887858a62c6b075f2e7aa433f1e919e24d1bb09 Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Fri, 26 Apr 2024 21:01:57 +0530 Subject: [PATCH 20/22] fix: avail test for blobPointer --- das/avail/avail_test.go | 21 +++++++++++++++++---- das/avail/utils.go | 10 +++++----- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go index a05defd5ba..46968790f5 100644 --- a/das/avail/avail_test.go +++ b/das/avail/avail_test.go @@ -34,13 +34,26 @@ func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { if err != nil { t.Fatalf("unable to read data from response body, err=%v", err) } - var bridgdeApiResponse BridgdeApiResponse - err = json.Unmarshal(body, &bridgdeApiResponse) + var bridgeApiResponse BridgeApiResponse + err = json.Unmarshal(body, &bridgeApiResponse) if err != nil { t.Fatalf("unable to unmarshal bridge api response, err=%v", err) } - t.Logf("%+v", bridgdeApiResponse) - var merkleProofInput MerkleProofInput = MerkleProofInput{bridgdeApiResponse.DataRootProof, bridgdeApiResponse.LeafProof, bridgdeApiResponse.RangeHash, bridgdeApiResponse.DataRootIndex, bridgdeApiResponse.BlobRoot, bridgdeApiResponse.BridgeRoot, bridgdeApiResponse.Leaf, bridgdeApiResponse.LeafIndex} + t.Logf("%+v", bridgeApiResponse) + + var dataRootProof [][32]byte + for _, hash := range bridgeApiResponse.DataRootProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + dataRootProof = append(dataRootProof, byte32Array) + } + var leafProof [][32]byte + for _, hash := range bridgeApiResponse.LeafProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + leafProof = append(leafProof, byte32Array) + } + var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} t.Logf("%+v", merkleProofInput) var blobPointer BlobPointer = BlobPointer{gsrpc_types.NewHash([]byte{245, 54, 19, 250, 6, 182, 183, 249, 220, 94, 76, 245, 242, 132, 154, 255, 201, 78, 25, 216, 169, 232, 153, 146, 7, 236, 224, 17, 117, 201, 136, 237}), diff --git a/das/avail/utils.go b/das/avail/utils.go index 382e858f18..746aefad3c 100644 --- a/das/avail/utils.go +++ b/das/avail/utils.go @@ -97,19 +97,19 @@ func QueryMerkleProofInput(blockHash string, extrinsicIndex int) (MerkleProofInp return MerkleProofInput{}, err } - var byte32ArrayDataRootProof [][32]byte + var dataRootProof [][32]byte for _, hash := range bridgeApiResponse.DataRootProof { var byte32Array [32]byte copy(byte32Array[:], hash[:]) - byte32ArrayDataRootProof = append(byte32ArrayDataRootProof, byte32Array) + dataRootProof = append(dataRootProof, byte32Array) } - var byte32ArrayLeafProof [][32]byte + var leafProof [][32]byte for _, hash := range bridgeApiResponse.LeafProof { var byte32Array [32]byte copy(byte32Array[:], hash[:]) - byte32ArrayLeafProof = append(byte32ArrayLeafProof, byte32Array) + leafProof = append(leafProof, byte32Array) } - var merkleProofInput MerkleProofInput = MerkleProofInput{byte32ArrayDataRootProof, byte32ArrayLeafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} + var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} return merkleProofInput, nil } } From da50864e2186126505c088e240d2908d4873536d Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Thu, 2 May 2024 11:22:57 +0530 Subject: [PATCH 21/22] fix: resolved unmarhaling for blobPointer and queryMerkleProofInput timeout --- das/avail/avail.go | 14 +--- das/avail/avail_test.go | 2 +- das/avail/blob.go | 84 ++-------------------- das/avail/merkleProofInput.go | 127 ++-------------------------------- das/avail/utils.go | 79 ++++++++++++--------- das/avail/vectorx/vectorx.go | 2 +- 6 files changed, 61 insertions(+), 247 deletions(-) diff --git a/das/avail/avail.go b/das/avail/avail.go index 1b389974ec..9cf201eeb1 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -3,7 +3,6 @@ package avail import ( "context" "fmt" - "os" "strings" "time" @@ -94,16 +93,6 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { // Contract address contractAddress := common.HexToAddress(cfg.VectorX) - // Contract ABI (Application Binary Interface) - pwd, _ := os.Getwd() - log.Info(pwd) - // byteValue, err := os.ReadFile(pwd + "/das/avail/vectorx/abi/Vectorx.abi.json") - // if err != nil { - // log.Warn("⚠ī¸ cannot read abi for vectorX: error:%v", err) - // return nil, err - // } - // vectorxABI := string(byteValue) - // Parse the contract ABI abi, err := abi.JSON(strings.NewReader(vectorx.VectorxABI)) if err != nil { @@ -156,7 +145,6 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { } nonce := GetAccountNonce(uint32(accountInfo.Nonce)) - // fmt.Println("Nonce from localDatabase:", nonce, " :::::::: from acountInfo:", accountInfo.Nonce) o := gsrpc_types.SignatureOptions{ BlockHash: a.genesisHash, Era: gsrpc_types.ExtrinsicEra{IsMortalEra: false}, @@ -228,7 +216,7 @@ outer: } log.Info("Finalized extrinsic", "extrinsicIndex", extrinsicIndex) - merkleProofInput, err := QueryMerkleProofInput(finalizedblockHash.Hex(), extrinsicIndex) + merkleProofInput, err := QueryMerkleProofInput(finalizedblockHash.Hex(), extrinsicIndex, 1200) if err != nil { return nil, err } diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go index 46968790f5..a945728c83 100644 --- a/das/avail/avail_test.go +++ b/das/avail/avail_test.go @@ -15,7 +15,7 @@ import ( func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { extrinsicIndex := 1 bridgeApiBaseURL := "https://hex-bridge-api.sandbox.avail.tools" - blockHashPath := "/eth/proof/" + "0xf53613fa06b6b7f9dc5e4cf5f2849affc94e19d8a9e8999207ece01175c988ed" //+ finalizedblockHash.Hex() + blockHashPath := "/eth/proof/" + "0x1672d81d105b9efb5689913ae3c608488419bc6e32a5f8cc7766d194e8865f30" //+ finalizedblockHash.Hex() params := url.Values{} params.Add("index", fmt.Sprint(extrinsicIndex)) diff --git a/das/avail/blob.go b/das/avail/blob.go index 91342d5f8f..205b81a3e1 100644 --- a/das/avail/blob.go +++ b/das/avail/blob.go @@ -34,16 +34,17 @@ var arguments = abi.Arguments{ // MarshalBinary encodes the BlobPointer to binary // serialization format: AvailMessageHeaderFlag + BlockHash + Sender + Nonce + DasTreeRootHash + MerkleProofInput // -// minimum size = 330 bytes -// ------------------------------------------------------------------------------------------------------------------------------------------------------------- +// minimum size = 330 bytes +// +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- // // | 1 byte | 32 byte | 48 byte | 8 byte | 32 byte | minimum bytes size = 210 | // -// ------------------------------------------------------------------------------------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- // // |<-- AvailMessageHeaderFlag -->|<----- BlockHash ----->|<----- Sender ----->|<----- Nonce ----->|<----- DasTreeRootHash ----->|<----- MerkleProofInput ----->| // -// ------------------------------------------------------------------------------------------------------------------------------------------------------------- +// ------------------------------------------------------------------------------------------------------------------------------------------------------------- func (b *BlobPointer) MarshalToBinary() ([]byte, error) { packedData, err := arguments.PackValues([]interface{}{b.BlockHash, b.Sender, b.Nonce, b.DasTreeRootHash, b.MerkleProofInput}) if err != nil { @@ -61,7 +62,7 @@ func (b *BlobPointer) MarshalToBinary() ([]byte, error) { } func (b *BlobPointer) UnmarshalFromBinary(data []byte) error { - unpackedData, err := arguments.UnpackValues(data[1:]) + unpackedData, err := arguments.UnpackValues(data) if err != nil { return fmt.Errorf("unable to covert the data bytes into blobPointer and getting error:%v", err) } @@ -72,76 +73,3 @@ func (b *BlobPointer) UnmarshalFromBinary(data []byte) error { b.MerkleProofInput = unpackedData[4].(MerkleProofInput) return nil } - -// func (b *BlobPointer) MarshalToBinary() ([]byte, error) { - -// buf := new(bytes.Buffer) - -// // Encoding at first the avail message header flag -// if err := binary.Write(buf, binary.BigEndian, AvailMessageHeaderFlag); err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } - -// // Marshaling in between: The Merkle proof input, which will be required for DA verification -// merkleProofInput, err := b.MerkleProofInput.MarshalToBinary() -// if err != nil { -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } -// buf.Write(merkleProofInput) - -// // Encoding at last: blockHash, sender address, nonce and DASTreeRootHash which will not be required for DA verification -// if err := binary.Write(buf, binary.BigEndian, b.BlockHash); err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } -// var senderBytes = []byte(b.Sender) -// if err = binary.Write(buf, binary.BigEndian, uint8(len(senderBytes))); err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } -// if err = binary.Write(buf, binary.BigEndian, senderBytes); err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } -// if err = binary.Write(buf, binary.BigEndian, b.Nonce); err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the avail block referece into array of bytes and getting error:%w", err) -// } -// if err = binary.Write(buf, binary.BigEndian, b.DasTreeRootHash); err != nil { -// fmt.Println("binary.Write failed:", err) -// } - -// return buf.Bytes(), nil -// } - -// // UnmarshalBinary decodes the binary to BlobPointer -// func (b *BlobPointer) UnmarshalFromBinary(blobPointerData []byte) error { -// buf := bytes.NewReader(blobPointerData) - -// if err := b.MerkleProofInput.UnmarshalFromBinary(buf); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &b.BlockHash); err != nil { -// return err -// } - -// var len uint8 -// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { -// return err -// } -// var senderBytes = make([]byte, len) -// if err := binary.Read(buf, binary.BigEndian, &senderBytes); err != nil { -// return err -// } -// b.Sender = string(senderBytes) -// if err := binary.Read(buf, binary.BigEndian, &b.Nonce); err != nil { -// return err -// } -// if err := binary.Read(buf, binary.BigEndian, &b.DasTreeRootHash); err != nil { -// return err -// } - -// return nil -// } diff --git a/das/avail/merkleProofInput.go b/das/avail/merkleProofInput.go index fee245d833..72891411e5 100644 --- a/das/avail/merkleProofInput.go +++ b/das/avail/merkleProofInput.go @@ -20,126 +20,13 @@ type MerkleProofInput struct { LeafIndex uint64 `json:"leafIndex"` } -// MarshalBinary encodes the MerkleProofInput to binary -// serialization format: Len(DataRootProof)+ + MerkleProofInput -// minimum size = 210 bytes -// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +// MarshalBinary encodes the MerkleProofInput to binary +// serialization format: Len(DataRootProof)+ + MerkleProofInput +// minimum size = 210 bytes +// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ // -// | 1 byte uint8 : DataRootProof length | 32*(len) byte : DataRootProof | 1 byte uint8 : LeafProof length | 32*(len) byte : LeafProof | 32 byte : RangeHash | 8 byte uint64 : DataRootIndex | 32 byte : BlobRoot | 32 byte : BridgeRoot | 32 byte : Leaf | 8 byte uint64 : LeafIndex | +// | 1 byte uint8 : DataRootProof length | 32*(len) byte : DataRootProof | 1 byte uint8 : LeafProof length | 32*(len) byte : LeafProof | 32 byte : RangeHash | 8 byte uint64 : DataRootIndex | 32 byte : BlobRoot | 32 byte : BridgeRoot | 32 byte : Leaf | 8 byte uint64 : LeafIndex | // -// <-------- len(DataRootProof) -------->|<------- DataRootProof ------->|<------- len(LeafProof) -------->|<------- LeafProof ------->|<---- RangeHash ---->|<------- DataRootIndex ------->|<---- BlobRoot ---->|<---- BridgeRoot ---->|<---- Leaf ---->|<------- LeafIndex ------->| +// <-------- len(DataRootProof) -------->|<------- DataRootProof ------->|<------- len(LeafProof) -------->|<------- LeafProof ------->|<---- RangeHash ---->|<------- DataRootIndex ------->|<---- BlobRoot ---->|<---- BridgeRoot ---->|<---- Leaf ---->|<------- LeafIndex ------->| // -// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ -// func (i *MerkleProofInput) MarshalToBinary() ([]byte, error) { -// buf := new(bytes.Buffer) -// err := binary.Write(buf, binary.BigEndian, uint8(len(i.DataRootProof))) -// if err != nil { -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.DataRootProof) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, uint8(len(i.LeafProof))) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.LeafProof) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.RangeHash) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.DataRootIndex) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.BlobRoot) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.BridgeRoot) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.Leaf) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// err = binary.Write(buf, binary.BigEndian, i.LeafIndex) -// if err != nil { -// fmt.Println("binary.Write failed:", err) -// return []byte{}, fmt.Errorf("unable to covert the merkle proof input into array of bytes and getting error:%w", err) -// } - -// return buf.Bytes(), nil -// } - -// func (m *MerkleProofInput) UnmarshalFromBinary(buf *bytes.Reader) error { -// var len uint8 -// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { -// return err -// } - -// m.DataRootProof = make([]gsrpc_types.Hash, len) -// for i := uint8(0); i < len; i++ { -// if err := binary.Read(buf, binary.BigEndian, &m.DataRootProof[i]); err != nil { -// return err -// } -// } - -// if err := binary.Read(buf, binary.BigEndian, &len); err != nil { -// return err -// } -// m.LeafProof = make([]gsrpc_types.Hash, len) -// for i := uint8(0); i < len; i++ { -// if err := binary.Read(buf, binary.BigEndian, &m.LeafProof[i]); err != nil { -// return err -// } -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.RangeHash); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.DataRootIndex); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.BlobRoot); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.BridgeRoot); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.Leaf); err != nil { -// return err -// } - -// if err := binary.Read(buf, binary.BigEndian, &m.LeafIndex); err != nil { -// return err -// } - -// return nil -// } +// ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ diff --git a/das/avail/utils.go b/das/avail/utils.go index 746aefad3c..ccd56cb14b 100644 --- a/das/avail/utils.go +++ b/das/avail/utils.go @@ -61,7 +61,7 @@ type BridgeApiResponse struct { RangeHash gsrpc_types.Hash `json:"rangeHash"` } -func QueryMerkleProofInput(blockHash string, extrinsicIndex int) (MerkleProofInput, error) { +func QueryMerkleProofInput(blockHash string, extrinsicIndex int, t int64) (MerkleProofInput, error) { // Quering for merkle proof from Bridge Api bridgeApiBaseURL := "https://hex-bridge-api.sandbox.avail.tools/" blockHashPath := "/eth/proof/" + blockHash @@ -73,43 +73,54 @@ func QueryMerkleProofInput(blockHash string, extrinsicIndex int) (MerkleProofInp u.RawQuery = params.Encode() urlStr := fmt.Sprintf("%v", u) + var resp *http.Response + timeout := time.After(time.Duration(t) * time.Second) + +outer: for { - resp, err := http.Get(urlStr) //nolint - if err != nil { - return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) - } - defer resp.Body.Close() + select { + case <-timeout: + return MerkleProofInput{}, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without merkleProofInput from bridge-api for blockHash %v and extrinsic index %v", t, blockHash, extrinsicIndex) - if resp.StatusCode != 200 { - log.Info("MerkleProofInput is not yet available from bridge-api", "status", resp.Status) - time.Sleep(3 * time.Minute) - continue + default: + var err error + resp, err = http.Get(urlStr) //nolint + if err != nil { + return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + log.Info("⚠ī¸đŸĨą MerkleProofInput is not yet available from bridge-api", "status", resp.Status) + time.Sleep(3 * time.Minute) + continue + } + break outer } + } - body, err := io.ReadAll(resp.Body) - if err != nil { - return MerkleProofInput{}, err - } - fmt.Println(string(body)) - var bridgeApiResponse BridgeApiResponse - err = json.Unmarshal(body, &bridgeApiResponse) - if err != nil { - return MerkleProofInput{}, err - } + body, err := io.ReadAll(resp.Body) + if err != nil { + return MerkleProofInput{}, err + } + fmt.Println(string(body)) + var bridgeApiResponse BridgeApiResponse + err = json.Unmarshal(body, &bridgeApiResponse) + if err != nil { + return MerkleProofInput{}, err + } - var dataRootProof [][32]byte - for _, hash := range bridgeApiResponse.DataRootProof { - var byte32Array [32]byte - copy(byte32Array[:], hash[:]) - dataRootProof = append(dataRootProof, byte32Array) - } - var leafProof [][32]byte - for _, hash := range bridgeApiResponse.LeafProof { - var byte32Array [32]byte - copy(byte32Array[:], hash[:]) - leafProof = append(leafProof, byte32Array) - } - var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} - return merkleProofInput, nil + var dataRootProof [][32]byte + for _, hash := range bridgeApiResponse.DataRootProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + dataRootProof = append(dataRootProof, byte32Array) + } + var leafProof [][32]byte + for _, hash := range bridgeApiResponse.LeafProof { + var byte32Array [32]byte + copy(byte32Array[:], hash[:]) + leafProof = append(leafProof, byte32Array) } + var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} + return merkleProofInput, nil } diff --git a/das/avail/vectorx/vectorx.go b/das/avail/vectorx/vectorx.go index c5f895d14b..42291ef68c 100644 --- a/das/avail/vectorx/vectorx.go +++ b/das/avail/vectorx/vectorx.go @@ -50,7 +50,7 @@ func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) er } defer sub.Unsubscribe() - log.Info("🎧 Listening for vectorx HeadUpdate event") + log.Info("🎧 Listening for vectorx HeadUpdate event with", "blockNumber", finalizedBlockNumber) timeout := time.After(time.Duration(t) * time.Second) // Loop to process incoming events for { From 154e91e6633633c9ab4fa3227dcfb293e6c8e37f Mon Sep 17 00:00:00 2001 From: rishabhagrawalzra Date: Fri, 10 May 2024 16:15:50 +0530 Subject: [PATCH 22/22] chore: avail package clean up --- contracts | 2 +- das/avail/avail.go | 203 +++++++++++++++++------------------ das/avail/avail_test.go | 29 +---- das/avail/utils.go | 87 +++++++++------ das/avail/vectorx/vectorx.go | 4 +- 5 files changed, 159 insertions(+), 166 deletions(-) diff --git a/contracts b/contracts index ce79d6f472..ba5a33f341 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ce79d6f472ad99c93403a2f15960c62003bc4277 +Subproject commit ba5a33f34114ed66e2d78f3d44465a470ebadcc8 diff --git a/das/avail/avail.go b/das/avail/avail.go index 9cf201eeb1..215384f6cd 100644 --- a/das/avail/avail.go +++ b/das/avail/avail.go @@ -10,7 +10,6 @@ import ( gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" "github.com/centrifuge/go-substrate-rpc-client/v4/signature" gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" - "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" @@ -19,7 +18,6 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/avail/vectorx" "github.com/offchainlabs/nitro/das/dastree" - "github.com/vedhavyas/go-subkey" ) // AvailMessageHeaderFlag indicates that this data is a Blob Pointer @@ -31,16 +29,19 @@ func IsAvailMessageHeaderByte(header byte) bool { } type AvailDA struct { - enable bool - vectorx vectorx.VectorX - timeout time.Duration - appID int - api *gsrpc.SubstrateAPI - meta *gsrpc_types.Metadata - genesisHash gsrpc_types.Hash - rv *gsrpc_types.RuntimeVersion - keyringPair signature.KeyringPair - key gsrpc_types.StorageKey + enable bool + vectorx vectorx.VectorX + finalizationTimeout time.Duration + appID int + api *gsrpc.SubstrateAPI + meta *gsrpc_types.Metadata + genesisHash gsrpc_types.Hash + rv *gsrpc_types.RuntimeVersion + keyringPair signature.KeyringPair + key gsrpc_types.StorageKey + bridgeApiBaseURL string + bridgeApiTimeout time.Duration + vectorXTimeout time.Duration } func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { @@ -113,25 +114,91 @@ func NewAvailDA(cfg DAConfig, l1Client arbutil.L1Interface) (*AvailDA, error) { } return &AvailDA{ - enable: cfg.Enable, - vectorx: vectorx.VectorX{Abi: abi, Client: client, Query: query}, - timeout: cfg.Timeout, - appID: appID, - api: api, - meta: meta, - genesisHash: genesisHash, - rv: rv, - keyringPair: keyringPair, - key: key, + enable: cfg.Enable, + vectorx: vectorx.VectorX{Abi: abi, Client: client, Query: query}, + finalizationTimeout: time.Duration(cfg.Timeout), + appID: appID, + api: api, + meta: meta, + genesisHash: genesisHash, + rv: rv, + keyringPair: keyringPair, + key: key, + bridgeApiBaseURL: "https://turing-bridge-api.fra.avail.so/", + bridgeApiTimeout: time.Duration(1200), + vectorXTimeout: time.Duration(10000), }, nil } func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { + finalizedblockHash, nonce, err := submitData(a, message) + if err != nil { + return nil, fmt.Errorf("cannot submit data to avail:%+v", err) + } + + header, err := a.api.RPC.Chain.GetHeader(finalizedblockHash) + if err != nil { + return nil, fmt.Errorf("cannot get header for finalized block:%+v", err) + } + + err = a.vectorx.SubscribeForHeaderUpdate(int(header.Number), a.vectorXTimeout) + if err != nil { + return nil, fmt.Errorf("cannot get the event for header update on vectorx:%+v", err) + } + + extrinsicIndex, err := GetExtrinsicIndex(a.api, finalizedblockHash, a.keyringPair.Address, nonce) + if err != nil { + return nil, err + } + log.Info("Finalized extrinsic", "extrinsicIndex", extrinsicIndex) + + merkleProofInput, err := QueryMerkleProofInput(a.bridgeApiBaseURL, finalizedblockHash.Hex(), extrinsicIndex, a.bridgeApiTimeout) + if err != nil { + return nil, err + } + + // Creating BlobPointer to submit over settlement layer + blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: uint32(nonce.Int64()), DasTreeRootHash: dastree.Hash(message), MerkleProofInput: merkleProofInput} + log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) + blobPointerData, err := blobPointer.MarshalToBinary() + if err != nil { + log.Warn("⚠ī¸ BlobPointer MashalBinary error", "err", err) + return nil, err + } + + return blobPointerData, nil +} + +func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { + log.Info("ℹī¸ Requesting data from Avail", "BlobPointer", blobPointer) + + // Intitializing variables + BlockHash := blobPointer.BlockHash + Address := blobPointer.Sender + Nonce := gsrpc_types.NewUCompactFromUInt(uint64(blobPointer.Nonce)) + + // Fetching block based on block hash + avail_blk, err := a.api.RPC.Chain.GetBlock(BlockHash) + if err != nil { + return []byte{}, fmt.Errorf("❌ cannot get block for hash:%v and getting error:%w", BlockHash.Hex(), err) + } + + // Extracting the required extrinsic according to the reference + data, err := extractExtrinsic(Address, Nonce.Int64(), avail_blk) + if err != nil { + return nil, err + } + + log.Info("✅ Succesfully fetched data from Avail") + return data, nil +} + +func submitData(a *AvailDA, message []byte) (gsrpc_types.Hash, gsrpc_types.UCompact, error) { c, err := gsrpc_types.NewCall(a.meta, "DataAvailability.submit_data", gsrpc_types.NewBytes(message)) if err != nil { log.Warn("⚠ī¸ cannot create new call: error:%v", err) - return nil, err + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, err } // Create the extrinsic @@ -141,15 +208,14 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { ok, err := a.api.RPC.State.GetStorageLatest(a.key, &accountInfo) if err != nil || !ok { log.Warn("⚠ī¸ cannot get latest storage: error:%v", err) - return nil, err + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, err } - nonce := GetAccountNonce(uint32(accountInfo.Nonce)) o := gsrpc_types.SignatureOptions{ BlockHash: a.genesisHash, Era: gsrpc_types.ExtrinsicEra{IsMortalEra: false}, GenesisHash: a.genesisHash, - Nonce: gsrpc_types.NewUCompactFromUInt(uint64(nonce)), + Nonce: gsrpc_types.NewUCompactFromUInt(uint64(accountInfo.Nonce)), SpecVersion: a.rv.SpecVersion, Tip: gsrpc_types.NewUCompactFromUInt(0), AppID: gsrpc_types.NewUCompactFromUInt(uint64(a.appID)), @@ -160,20 +226,20 @@ func (a *AvailDA) Store(ctx context.Context, message []byte) ([]byte, error) { err = ext.Sign(a.keyringPair, o) if err != nil { log.Warn("⚠ī¸ cannot sign: error:%v", err) - return nil, err + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, err } // Send the extrinsic sub, err := a.api.RPC.Author.SubmitAndWatchExtrinsic(ext) if err != nil { log.Warn("⚠ī¸ cannot submit extrinsic: error:%v", err) - return nil, err + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, err } log.Info("✅ Tx batch is submitted to Avail", "length", len(message), "address", a.keyringPair.Address, "appID", a.appID) defer sub.Unsubscribe() - timeout := time.After(time.Duration(a.timeout) * time.Second) + timeout := time.After(a.finalizationTimeout * time.Second) var finalizedblockHash gsrpc_types.Hash outer: @@ -187,85 +253,18 @@ outer: finalizedblockHash = status.AsFinalized break outer } else if status.IsDropped { - return nil, fmt.Errorf("❌ Extrinsic dropped") + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, fmt.Errorf("❌ Extrinsic dropped") } else if status.IsUsurped { - return nil, fmt.Errorf("❌ Extrinsic usurped") + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, fmt.Errorf("❌ Extrinsic usurped") } else if status.IsRetracted { - return nil, fmt.Errorf("❌ Extrinsic retracted") + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, fmt.Errorf("❌ Extrinsic retracted") } else if status.IsInvalid { - return nil, fmt.Errorf("❌ Extrinsic invalid") + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, fmt.Errorf("❌ Extrinsic invalid") } case <-timeout: - return nil, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting finalized status for extrinsic", a.timeout) - } - } - - header, err := a.api.RPC.Chain.GetHeader(finalizedblockHash) - if err != nil { - return nil, fmt.Errorf("cannot get header for finalized block:%+v", err) - } - - err = a.vectorx.SubscribeForHeaderUpdate(int(header.Number), 7200) - if err != nil { - return nil, fmt.Errorf("cannot get the event for header update on vectorx:%+v", err) - } - - extrinsicIndex, err := GetExtrinsicIndex(a.api, finalizedblockHash, a.keyringPair.Address, o.Nonce) - if err != nil { - return nil, err - } - log.Info("Finalized extrinsic", "extrinsicIndex", extrinsicIndex) - - merkleProofInput, err := QueryMerkleProofInput(finalizedblockHash.Hex(), extrinsicIndex, 1200) - if err != nil { - return nil, err - } - - // Creating BlobPointer to submit over settlement layer - blobPointer := BlobPointer{BlockHash: finalizedblockHash, Sender: a.keyringPair.Address, Nonce: nonce, DasTreeRootHash: dastree.Hash(message), MerkleProofInput: merkleProofInput} - log.Info("✅ Sucesfully included in block data to Avail", "BlobPointer:", blobPointer) - blobPointerData, err := blobPointer.MarshalToBinary() - if err != nil { - log.Warn("⚠ī¸ BlobPointer MashalBinary error", "err", err) - return nil, err - } - - return blobPointerData, nil -} - -func (a *AvailDA) Read(ctx context.Context, blobPointer BlobPointer) ([]byte, error) { - log.Info("ℹī¸ Requesting data from Avail", "BlobPointer", blobPointer) - - // Intitializing variables - BlockHash := blobPointer.BlockHash - Address := blobPointer.Sender - Nonce := gsrpc_types.NewUCompactFromUInt(uint64(blobPointer.Nonce)) - - // Fetching block based on block hash - avail_blk, err := a.api.RPC.Chain.GetBlock(BlockHash) - if err != nil { - return []byte{}, fmt.Errorf("❌ cannot get block for hash:%v and getting error:%w", BlockHash.Hex(), err) - } - - // Extracting the required extrinsic according to the reference - for _, ext := range avail_blk.Block.Extrinsics { - // Extracting sender address for extrinsic - ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) - if err != nil { - log.Error("❌ unable to get sender address from extrinsic", "err", err) - } - - if ext_Addr == Address && ext.Signature.Nonce.Int64() == Nonce.Int64() { - args := ext.Method.Args - var data []byte - err = codec.Decode(args, &data) - if err != nil { - return []byte{}, fmt.Errorf("❌ unable to decode the extrinsic data by address: %v with nonce: %v", Address, Nonce) - } - return data, nil + return gsrpc_types.Hash{}, gsrpc_types.UCompact{}, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without getting finalized status for extrinsic", a.finalizationTimeout) } } - log.Info("✅ Succesfully fetched data from Avail") - return nil, fmt.Errorf("❌ unable to find any extrinsic for this blobPointer:%+v", blobPointer) + return finalizedblockHash, o.Nonce, nil } diff --git a/das/avail/avail_test.go b/das/avail/avail_test.go index a945728c83..aeeec6c8f9 100644 --- a/das/avail/avail_test.go +++ b/das/avail/avail_test.go @@ -1,10 +1,7 @@ package avail import ( - "encoding/json" "fmt" - "io" - "net/http" "net/url" "testing" @@ -25,35 +22,13 @@ func TestMarshallingAndUnmarshalingBlobPointer(t *testing.T) { urlStr := fmt.Sprintf("%v", u) t.Log(urlStr) // TODO: Add time difference between batch submission and querying merkle proof - resp, err := http.Get(urlStr) //nolint + bridgeApiResponse, err := queryForBridgeApiRespose(600, urlStr) if err != nil { t.Fatalf("Bridge Api request not successfull, err=%v", err) } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unable to read data from response body, err=%v", err) - } - var bridgeApiResponse BridgeApiResponse - err = json.Unmarshal(body, &bridgeApiResponse) - if err != nil { - t.Fatalf("unable to unmarshal bridge api response, err=%v", err) - } t.Logf("%+v", bridgeApiResponse) - var dataRootProof [][32]byte - for _, hash := range bridgeApiResponse.DataRootProof { - var byte32Array [32]byte - copy(byte32Array[:], hash[:]) - dataRootProof = append(dataRootProof, byte32Array) - } - var leafProof [][32]byte - for _, hash := range bridgeApiResponse.LeafProof { - var byte32Array [32]byte - copy(byte32Array[:], hash[:]) - leafProof = append(leafProof, byte32Array) - } - var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} + merkleProofInput := createMerkleProofInput(bridgeApiResponse) t.Logf("%+v", merkleProofInput) var blobPointer BlobPointer = BlobPointer{gsrpc_types.NewHash([]byte{245, 54, 19, 250, 6, 182, 183, 249, 220, 94, 76, 245, 242, 132, 154, 255, 201, 78, 25, 216, 169, 232, 153, 146, 7, 236, 224, 17, 117, 201, 136, 237}), diff --git a/das/avail/utils.go b/das/avail/utils.go index ccd56cb14b..b6e7a1a3c2 100644 --- a/das/avail/utils.go +++ b/das/avail/utils.go @@ -10,21 +10,11 @@ import ( gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" gsrpc_types "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/centrifuge/go-substrate-rpc-client/v4/types/codec" "github.com/ethereum/go-ethereum/log" "github.com/vedhavyas/go-subkey" ) -var localNonce uint32 = 0 - -func GetAccountNonce(accountNonce uint32) uint32 { - if accountNonce > localNonce { - localNonce = accountNonce - return accountNonce - } - localNonce++ - return localNonce -} - func GetExtrinsicIndex(api *gsrpc.SubstrateAPI, blockHash gsrpc_types.Hash, address string, nonce gsrpc_types.UCompact) (int, error) { // Fetching block based on block hash avail_blk, err := api.RPC.Chain.GetBlock(blockHash) @@ -47,6 +37,27 @@ func GetExtrinsicIndex(api *gsrpc.SubstrateAPI, blockHash gsrpc_types.Hash, addr return -1, fmt.Errorf("❌ unable to find any extrinsic in block %v, from address %v with nonce %v", blockHash, address, nonce) } +func extractExtrinsic(address string, nonce int64, avail_blk *gsrpc_types.SignedBlock) ([]byte, error) { + for _, ext := range avail_blk.Block.Extrinsics { + // Extracting sender address for extrinsic + ext_Addr, err := subkey.SS58Address(ext.Signature.Signer.AsID.ToBytes(), 42) + if err != nil { + log.Error("❌ unable to get sender address from extrinsic", "err", err) + } + + if ext_Addr == address && ext.Signature.Nonce.Int64() == nonce { + args := ext.Method.Args + var data []byte + err = codec.Decode(args, &data) + if err != nil { + return []byte{}, fmt.Errorf("❌ unable to decode the extrinsic data by address: %v with nonce: %v", address, nonce) + } + return data, nil + } + } + return nil, fmt.Errorf("❌ unable to find any extrinsic") +} + type BridgeApiResponse struct { BlobRoot gsrpc_types.Hash `json:"blobRoot"` BlockHash gsrpc_types.Hash `json:"blockHash"` @@ -61,9 +72,8 @@ type BridgeApiResponse struct { RangeHash gsrpc_types.Hash `json:"rangeHash"` } -func QueryMerkleProofInput(blockHash string, extrinsicIndex int, t int64) (MerkleProofInput, error) { +func QueryMerkleProofInput(bridgeApiBaseURL string, blockHash string, extrinsicIndex int, t time.Duration) (MerkleProofInput, error) { // Quering for merkle proof from Bridge Api - bridgeApiBaseURL := "https://hex-bridge-api.sandbox.avail.tools/" blockHashPath := "/eth/proof/" + blockHash params := url.Values{} params.Add("index", fmt.Sprint(extrinsicIndex)) @@ -73,20 +83,29 @@ func QueryMerkleProofInput(blockHash string, extrinsicIndex int, t int64) (Merkl u.RawQuery = params.Encode() urlStr := fmt.Sprintf("%v", u) - var resp *http.Response - timeout := time.After(time.Duration(t) * time.Second) + bridgeApiResponse, err := queryForBridgeApiRespose(t, urlStr) + if err != nil { + return MerkleProofInput{}, fmt.Errorf("failed querying bridgeApiResponse for blockHash:%v and extrinsicIndex:%v, error:%w", blockHash, extrinsicIndex, err) + } + + merkleProofInput := createMerkleProofInput(bridgeApiResponse) + + return merkleProofInput, nil +} -outer: +func queryForBridgeApiRespose(t time.Duration, urlStr string) (BridgeApiResponse, error) { + var resp *http.Response + timeout := time.After(t * time.Second) for { select { case <-timeout: - return MerkleProofInput{}, fmt.Errorf("⌛ī¸ Timeout of %d seconds reached without merkleProofInput from bridge-api for blockHash %v and extrinsic index %v", t, blockHash, extrinsicIndex) + return BridgeApiResponse{}, fmt.Errorf("⌛ī¸ Timeout of %f min reached without merkleProofInput from bridge-api", t.Minutes()) default: var err error resp, err = http.Get(urlStr) //nolint if err != nil { - return MerkleProofInput{}, fmt.Errorf("bridge Api request not successfull, err=%w", err) + return BridgeApiResponse{}, err } defer resp.Body.Close() if resp.StatusCode != 200 { @@ -94,33 +113,33 @@ outer: time.Sleep(3 * time.Minute) continue } - break outer + body, err := io.ReadAll(resp.Body) + if err != nil { + return BridgeApiResponse{}, err + } + var bridgeApiResponse BridgeApiResponse + err = json.Unmarshal(body, &bridgeApiResponse) + if err != nil { + return BridgeApiResponse{}, err + } + return bridgeApiResponse, nil } } +} - body, err := io.ReadAll(resp.Body) - if err != nil { - return MerkleProofInput{}, err - } - fmt.Println(string(body)) - var bridgeApiResponse BridgeApiResponse - err = json.Unmarshal(body, &bridgeApiResponse) - if err != nil { - return MerkleProofInput{}, err - } - +func createMerkleProofInput(b BridgeApiResponse) MerkleProofInput { var dataRootProof [][32]byte - for _, hash := range bridgeApiResponse.DataRootProof { + for _, hash := range b.DataRootProof { var byte32Array [32]byte copy(byte32Array[:], hash[:]) dataRootProof = append(dataRootProof, byte32Array) } var leafProof [][32]byte - for _, hash := range bridgeApiResponse.LeafProof { + for _, hash := range b.LeafProof { var byte32Array [32]byte copy(byte32Array[:], hash[:]) leafProof = append(leafProof, byte32Array) } - var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, bridgeApiResponse.RangeHash, bridgeApiResponse.DataRootIndex, bridgeApiResponse.BlobRoot, bridgeApiResponse.BridgeRoot, bridgeApiResponse.Leaf, bridgeApiResponse.LeafIndex} - return merkleProofInput, nil + var merkleProofInput MerkleProofInput = MerkleProofInput{dataRootProof, leafProof, b.RangeHash, b.DataRootIndex, b.BlobRoot, b.BridgeRoot, b.Leaf, b.LeafIndex} + return merkleProofInput } diff --git a/das/avail/vectorx/vectorx.go b/das/avail/vectorx/vectorx.go index 42291ef68c..36c5f7bec6 100644 --- a/das/avail/vectorx/vectorx.go +++ b/das/avail/vectorx/vectorx.go @@ -41,7 +41,7 @@ type VectorX struct { Query ethereum.FilterQuery } -func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) error { +func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t time.Duration) error { // Subscribe to the event stream logs := make(chan types.Log) sub, err := v.Client.SubscribeFilterLogs(context.Background(), v.Query, logs) @@ -51,7 +51,7 @@ func (v *VectorX) SubscribeForHeaderUpdate(finalizedBlockNumber int, t int64) er defer sub.Unsubscribe() log.Info("🎧 Listening for vectorx HeadUpdate event with", "blockNumber", finalizedBlockNumber) - timeout := time.After(time.Duration(t) * time.Second) + timeout := time.After(t * time.Second) // Loop to process incoming events for { select {