diff --git a/Makefile b/Makefile index 951eb6d93c..406be65c74 100644 --- a/Makefile +++ b/Makefile @@ -133,7 +133,9 @@ node1: juno-cached --p2p-peers=/ip4/127.0.0.1/tcp/7777/p2p/12D3KooWLdURCjbp1D7hkXWk6ZVfcMDPtsNnPHuxoTcWXFtvrxGG \ --p2p-addr=/ip4/0.0.0.0/tcp/7778 \ --p2p-private-key="8aeffc26c3c371565dbe634c5248ae26f4fa5c33bc8f7328ac95e73fb94eaf263550f02449521f7cf64af17d248c5f170be46c06986a29803124c0819cb8fac3" \ - --metrics-port=9091 + --metrics-port=9091 \ + --pprof \ + --pprof-port=9096 \ # --p2p-peers=/ip4/127.0.0.1/tcp/7778/p2p/12D3KooWDQVMmK6cQrfFcWUoFF8Ch5vYegfwiP5Do2SFC2NAXeBk \ diff --git a/adapters/p2p2core/felt.go b/adapters/p2p2core/felt.go index cd4b3080b2..0d200fcf18 100644 --- a/adapters/p2p2core/felt.go +++ b/adapters/p2p2core/felt.go @@ -31,7 +31,6 @@ func adapt(v interface{ GetElements() []byte }) *felt.Felt { if v == nil || reflect.ValueOf(v).IsNil() { return nil } - return new(felt.Felt).SetBytes(v.GetElements()) } diff --git a/blockchain/blockchain.go b/blockchain/blockchain.go index 4aa6659b98..19fc26c6c4 100644 --- a/blockchain/blockchain.go +++ b/blockchain/blockchain.go @@ -74,6 +74,8 @@ type Blockchain struct { network *utils.Network database db.DB + snapshots []*snapshotRecord + listener EventListener cachedPending atomic.Pointer[Pending] @@ -81,11 +83,20 @@ type Blockchain struct { func New(database db.DB, network *utils.Network) *Blockchain { RegisterCoreTypesToEncoder() - return &Blockchain{ + bc := &Blockchain{ database: database, network: network, listener: &SelectiveListener{}, } + + // TODO: Used only for testing though... + // TODO: the following is only used for snap sync, uncomment when we need it again + // err := bc.seedSnapshot() + // if err != nil { + // fmt.Printf("Error seeding snapshot %s", err) + // } + + return bc } func (b *Blockchain) WithListener(listener EventListener) *Blockchain { @@ -336,7 +347,7 @@ func (b *Blockchain) SetL1Head(update *core.L1Head) error { func (b *Blockchain) Store(block *core.Block, blockCommitments *core.BlockCommitments, stateUpdate *core.StateUpdate, newClasses map[felt.Felt]core.Class, ) error { - return b.database.Update(func(txn db.Transaction) error { + err := b.database.Update(func(txn db.Transaction) error { if err := verifyBlock(txn, block); err != nil { return err } @@ -372,6 +383,50 @@ func (b *Blockchain) Store(block *core.Block, blockCommitments *core.BlockCommit heightBin := core.MarshalBlockNumber(block.Number) return txn.Set(db.ChainHeight.Key(), heightBin) }) + if err != nil { + return err + } + + // TODO: the following is only used for snap sync, uncomment when we need it again + // err = b.seedSnapshot() + // if err != nil { + // return err + // } + + return nil +} + +func (b *Blockchain) StoreRaw(blockNumber uint64, stateDiff *core.StateDiff) error { + return b.database.Update(func(txn db.Transaction) error { + return core.NewState(txn).UpdateNoVerify(blockNumber, stateDiff, make(map[felt.Felt]core.Class)) + }) +} + +func (b *Blockchain) PutClasses(blockNumber uint64, classHashes map[felt.Felt]*felt.Felt, newClasses map[felt.Felt]core.Class) error { + return b.database.Update(func(txn db.Transaction) error { + v1ClassHashes := map[felt.Felt]*felt.Felt{} + for ch, class := range newClasses { + if class.Version() == 1 { + v1ClassHashes[ch] = classHashes[ch] + } + } + + return core.NewState(txn).UpdateNoVerify(blockNumber, &core.StateDiff{ + DeclaredV1Classes: v1ClassHashes, + }, newClasses) + }) +} + +func (b *Blockchain) PutContracts(address, nonces, classHash []*felt.Felt) error { + return b.database.Update(func(txn db.Transaction) error { + return core.NewState(txn).UpdateContractNoLog(address, nonces, classHash) + }) +} + +func (b *Blockchain) PutStorage(storage map[felt.Felt]map[felt.Felt]*felt.Felt) error { + return b.database.Update(func(txn db.Transaction) error { + return core.NewState(txn).UpdateContractStorages(storage) + }) } // VerifyBlock assumes the block has already been sanity-checked. @@ -769,6 +824,16 @@ func (b *Blockchain) HeadState() (core.StateReader, StateCloser, error) { return core.NewState(txn), txn.Discard, nil } +func (b *Blockchain) HeadStateFreakingState() (*core.State, StateCloser, error) { + b.listener.OnRead("HeadState") + txn, err := b.database.NewTransaction(false) + if err != nil { + return nil, nil, err + } + + return core.NewState(txn), txn.Discard, nil +} + // StateAtBlockNumber returns a StateReader that provides a stable view to the state at the given block number func (b *Blockchain) StateAtBlockNumber(blockNumber uint64) (core.StateReader, StateCloser, error) { b.listener.OnRead("StateAtBlockNumber") diff --git a/blockchain/blockchain_test.go b/blockchain/blockchain_test.go index c0c3666a91..abb70e01b7 100644 --- a/blockchain/blockchain_test.go +++ b/blockchain/blockchain_test.go @@ -27,6 +27,7 @@ func TestNew(t *testing.T) { gw := adaptfeeder.New(client) t.Run("empty blockchain's head is nil", func(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() assert.Equal(t, &utils.Mainnet, chain.Network()) b, err := chain.Head() assert.Nil(t, b) @@ -42,8 +43,10 @@ func TestNew(t *testing.T) { testDB := pebble.NewMemTest(t) chain := blockchain.New(testDB, &utils.Mainnet) assert.NoError(t, chain.Store(block0, &emptyCommitments, stateUpdate0, nil)) + chain.Close() chain = blockchain.New(testDB, &utils.Mainnet) + defer chain.Close() b, err := chain.Head() require.NoError(t, err) assert.Equal(t, block0, b) @@ -55,6 +58,7 @@ func TestHeight(t *testing.T) { gw := adaptfeeder.New(client) t.Run("return nil if blockchain is empty", func(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Sepolia) + defer chain.Close() _, err := chain.Height() assert.Error(t, err) }) @@ -68,16 +72,19 @@ func TestHeight(t *testing.T) { testDB := pebble.NewMemTest(t) chain := blockchain.New(testDB, &utils.Mainnet) assert.NoError(t, chain.Store(block0, &emptyCommitments, stateUpdate0, nil)) + chain.Close() chain = blockchain.New(testDB, &utils.Mainnet) height, err := chain.Height() require.NoError(t, err) assert.Equal(t, block0.Number, height) + chain.Close() }) } func TestBlockByNumberAndHash(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Sepolia) + defer chain.Close() t.Run("same block is returned for both GetBlockByNumber and GetBlockByHash", func(t *testing.T) { client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -114,6 +121,7 @@ func TestVerifyBlock(t *testing.T) { require.NoError(t, err) chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() t.Run("error if chain is empty and incoming block number is not 0", func(t *testing.T) { block := &core.Block{Header: &core.Header{Number: 10}} @@ -176,6 +184,7 @@ func TestSanityCheckNewHeight(t *testing.T) { require.NoError(t, err) chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) @@ -221,6 +230,7 @@ func TestStore(t *testing.T) { t.Run("add block to empty blockchain", func(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() require.NoError(t, chain.Store(block0, &emptyCommitments, stateUpdate0, nil)) headBlock, err := chain.Head() @@ -247,6 +257,7 @@ func TestStore(t *testing.T) { require.NoError(t, err) chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() require.NoError(t, chain.Store(block0, &emptyCommitments, stateUpdate0, nil)) require.NoError(t, chain.Store(block1, &emptyCommitments, stateUpdate1, nil)) @@ -270,6 +281,7 @@ func TestStore(t *testing.T) { func TestBlockCommitments(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -295,6 +307,7 @@ func TestBlockCommitments(t *testing.T) { func TestTransactionAndReceipt(t *testing.T) { chain := blockchain.New(pebble.NewMemTest(t), &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -383,6 +396,7 @@ func TestTransactionAndReceipt(t *testing.T) { func TestState(t *testing.T) { testDB := pebble.NewMemTest(t) chain := blockchain.New(testDB, &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -446,6 +460,7 @@ func TestState(t *testing.T) { func TestEvents(t *testing.T) { testDB := pebble.NewMemTest(t) chain := blockchain.New(testDB, &utils.Goerli2) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Goerli2) gw := adaptfeeder.New(client) @@ -565,6 +580,7 @@ func TestEvents(t *testing.T) { func TestRevert(t *testing.T) { testdb := pebble.NewMemTest(t) chain := blockchain.New(testdb, &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -653,6 +669,7 @@ func TestL1Update(t *testing.T) { got, err := chain.L1Head() require.NoError(t, err) assert.Equal(t, head, got) + chain.Close() }) } } @@ -660,6 +677,7 @@ func TestL1Update(t *testing.T) { func TestPending(t *testing.T) { testDB := pebble.NewMemTest(t) chain := blockchain.New(testDB, &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -801,6 +819,7 @@ func TestPending(t *testing.T) { func TestStorePendingIncludesNumber(t *testing.T) { network := utils.Mainnet chain := blockchain.New(pebble.NewMemTest(t), &network) + defer chain.Close() // Store pending genesis. require.NoError(t, chain.StorePending(&blockchain.Pending{ diff --git a/blockchain/snap_server_interface.go b/blockchain/snap_server_interface.go new file mode 100644 index 0000000000..ce530af9a9 --- /dev/null +++ b/blockchain/snap_server_interface.go @@ -0,0 +1,184 @@ +package blockchain + +import ( + "context" + "errors" + + "github.com/NethermindEth/juno/core" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/db" + "github.com/NethermindEth/juno/service" + "github.com/NethermindEth/juno/utils" +) + +const MaxSnapshots = 128 + +//nolint:unused +type snapshotRecord struct { + stateRoot *felt.Felt + contractsRoot *felt.Felt + classRoot *felt.Felt + blockHash *felt.Felt + txn db.Transaction + closer func() error +} + +var ErrMissingSnapshot = errors.New("missing snapshot") + +func (b *Blockchain) GetStateForStateRoot(stateRoot *felt.Felt) (*core.State, error) { + snapshot, err := b.findSnapshotMatching(func(record *snapshotRecord) bool { + return record.stateRoot.Equal(stateRoot) + }) + if err != nil { + return nil, err + } + + s := core.NewState(snapshot.txn) + + return s, nil +} + +func (b *Blockchain) findSnapshotMatching(filter func(record *snapshotRecord) bool) (*snapshotRecord, error) { + var snapshot *snapshotRecord + for _, record := range b.snapshots { + if filter(record) { + snapshot = record + break + } + } + + if snapshot == nil { + return nil, ErrMissingSnapshot + } + + return snapshot, nil +} + +func (b *Blockchain) GetClasses(felts []*felt.Felt) ([]core.Class, error) { + classes := make([]core.Class, len(felts)) + err := b.database.View(func(txn db.Transaction) error { + state := core.NewState(txn) + for i, f := range felts { + d, err := state.Class(f) + if err != nil && !errors.Is(err, db.ErrKeyNotFound) { + return err + } else if errors.Is(err, db.ErrKeyNotFound) { + classes[i] = nil + } else { + classes[i] = d.Class + } + } + + return nil + }) + if err != nil { + return nil, err + } + + return classes, nil +} + +func (b *Blockchain) GetDClasses(felts []*felt.Felt) ([]*core.DeclaredClass, error) { + classes := make([]*core.DeclaredClass, len(felts)) + err := b.database.View(func(txn db.Transaction) error { + state := core.NewState(txn) + for i, f := range felts { + d, err := state.Class(f) + if err != nil && !errors.Is(err, db.ErrKeyNotFound) { + return err + } else if errors.Is(err, db.ErrKeyNotFound) { + classes[i] = nil + } else { + classes[i] = d + } + } + + return nil + }) + if err != nil { + return nil, err + } + + return classes, nil +} + +// func (b *Blockchain) seedSnapshot() error { +// headheader, err := b.HeadsHeader() +// if err != nil { +// return err +// } + +// stateR, srCloser, err := b.HeadState() +// if err != nil { +// return err +// } + +// defer func() { _ = srCloser() }() + +// state := stateR.(*core.State) +// contractsRoot, classRoot, err := state.StateAndClassRoot() +// if err != nil { +// return err +// } + +// stateRoot, err := state.Root() +// if err != nil { +// return err +// } + +// txn, closer, err := b.database.PersistedView() +// if err != nil { +// return err +// } + +// dbsnap := snapshotRecord{ +// stateRoot: stateRoot, +// contractsRoot: contractsRoot, +// classRoot: classRoot, +// blockHash: headheader.Hash, +// txn: txn, +// closer: closer, +// } + +// fmt.Printf("Snapshot %d %s %s\n", headheader.Number, headheader.GlobalStateRoot, stateRoot) + +// // TODO: Reorgs +// b.snapshots = append(b.snapshots, &dbsnap) +// if len(b.snapshots) > MaxSnapshots { +// toremove := b.snapshots[0] +// err = toremove.closer() +// if err != nil { +// return err +// } + +// // TODO: I think internally, it keep the old array. +// // maybe the append copy it to a new array, who knows... +// b.snapshots = b.snapshots[1:] +// } + +// return nil +// } + +func (b *Blockchain) Close() { + for _, snapshot := range b.snapshots { + // ignore the errors here as it's most likely called on shutdown + _ = snapshot.closer() + } +} + +type BlockchainCloser struct { + log utils.SimpleLogger + bc *Blockchain +} + +var _ service.Service = (*BlockchainCloser)(nil) + +func NewBlockchainCloser(bc *Blockchain, log utils.SimpleLogger) *BlockchainCloser { + return &BlockchainCloser{log, bc} +} + +func (b *BlockchainCloser) Run(ctx context.Context) error { + <-ctx.Done() + b.bc.Close() + return nil +} diff --git a/cmd/juno/juno.go b/cmd/juno/juno.go index 6739a63357..d5296e99b2 100644 --- a/cmd/juno/juno.go +++ b/cmd/juno/juno.go @@ -83,6 +83,7 @@ const ( corsEnableF = "rpc-cors-enable" versionedConstantsFileF = "versioned-constants-file" pluginPathF = "plugin-path" + p2pSyncModeF = "p2p-sync-mode" defaultConfig = "" defaulHost = "localhost" @@ -121,6 +122,7 @@ const ( defaultCorsEnable = false defaultVersionedConstantsFile = "" defaultPluginPath = "" + defaultP2pSyncMode = "full" configFlagUsage = "The YAML configuration file." logLevelFlagUsage = "Options: trace, debug, info, warn, error." @@ -154,6 +156,7 @@ const ( p2pFeederNodeUsage = "EXPERIMENTAL: Run juno as a feeder node which will only sync from feeder gateway and gossip the new" + " blocks to the network." p2pPrivateKeyUsage = "EXPERIMENTAL: Hexadecimal representation of a private key on the Ed25519 elliptic curve." + p2pSyncModeUsage = "EXPERIMENTAL: Synchronisation mode: 'full' (default), 'snap'" metricsUsage = "Enables the Prometheus metrics endpoint on the default port." metricsHostUsage = "The interface on which the Prometheus endpoint will listen for requests." metricsPortUsage = "The port on which the Prometheus endpoint will listen for requests." @@ -338,6 +341,7 @@ func NewCmd(config *node.Config, run func(*cobra.Command, []string) error) *cobr junoCmd.Flags().String(p2pPeersF, defaultP2pPeers, p2pPeersUsage) junoCmd.Flags().Bool(p2pFeederNodeF, defaultP2pFeederNode, p2pFeederNodeUsage) junoCmd.Flags().String(p2pPrivateKey, defaultP2pPrivateKey, p2pPrivateKeyUsage) + junoCmd.Flags().String(p2pSyncModeF, defaultP2pSyncMode, p2pSyncModeUsage) junoCmd.Flags().Bool(metricsF, defaultMetrics, metricsUsage) junoCmd.Flags().String(metricsHostF, defaulHost, metricsHostUsage) junoCmd.Flags().Uint16(metricsPortF, defaultMetricsPort, metricsPortUsage) diff --git a/core/contract.go b/core/contract.go index 2af1fd8c4c..69f3e23e5f 100644 --- a/core/contract.go +++ b/core/contract.go @@ -10,7 +10,10 @@ import ( ) // contract storage has fixed height at 251 -const ContractStorageTrieHeight = 251 +const ( + GlobalTrieHeight = 251 + ContractStorageTrieHeight = 251 +) var ( ErrContractNotDeployed = errors.New("contract not deployed") @@ -168,6 +171,32 @@ func (c *ContractUpdater) UpdateStorage(diff map[felt.Felt]*felt.Felt, cb OnValu return cStorage.Commit() } +// UpdateStorage applies a change-set to the contract storage. +func (c *ContractUpdater) UpdateStorageKV(diff []FeltKV, cb OnValueChanged) error { + cStorage, err := storage(c.Address, c.txn) + if err != nil { + return err + } + + // apply the diff + for _, kv := range diff { + key := kv.Key + value := kv.Value + oldValue, pErr := cStorage.Put(key, value) + if pErr != nil { + return pErr + } + + if oldValue != nil { + if err = cb(key, oldValue); err != nil { + return err + } + } + } + + return cStorage.Commit() +} + func ContractStorage(addr, key *felt.Felt, txn db.Transaction) (*felt.Felt, error) { cStorage, err := storage(addr, txn) if err != nil { diff --git a/core/state.go b/core/state.go index effde8b518..fb7d8e8c19 100644 --- a/core/state.go +++ b/core/state.go @@ -124,6 +124,38 @@ func (s *State) Root() (*felt.Felt, error) { return crypto.PoseidonArray(stateVersion, storageRoot, classesRoot), nil } +func (s *State) StateAndClassRoot() (*felt.Felt, *felt.Felt, error) { + var storageRoot, classesRoot *felt.Felt + + sStorage, closer, err := s.storage() + if err != nil { + return nil, nil, err + } + + if storageRoot, err = sStorage.Root(); err != nil { + return nil, nil, err + } + + if err = closer(); err != nil { + return nil, nil, err + } + + classes, closer, err := s.classesTrie() + if err != nil { + return nil, nil, err + } + + if classesRoot, err = classes.Root(); err != nil { + return nil, nil, err + } + + if err = closer(); err != nil { + return nil, nil, err + } + + return storageRoot, classesRoot, nil +} + // storage returns a [core.Trie] that represents the Starknet global state in the given Txn context. func (s *State) storage() (*trie.Trie, func() error, error) { return s.globalTrie(db.StateTrie, trie.NewTriePedersen) @@ -133,6 +165,19 @@ func (s *State) classesTrie() (*trie.Trie, func() error, error) { return s.globalTrie(db.ClassesTrie, trie.NewTriePoseidon) } +// storage returns a [core.Trie] that represents the Starknet global state in the given Txn context. +func (s *State) StorageTrie() (*trie.Trie, func() error, error) { + return s.storage() +} + +func (s *State) ClassTrie() (*trie.Trie, func() error, error) { + return s.classesTrie() +} + +func (s *State) StorageTrieForAddr(addr *felt.Felt) (*trie.Trie, error) { + return storage(addr, s.txn) +} + func (s *State) globalTrie(bucket db.Bucket, newTrie trie.NewTrieFunc) (*trie.Trie, func() error, error) { dbPrefix := bucket.Key() tTxn := trie.NewStorage(s.txn, dbPrefix) @@ -204,14 +249,22 @@ func (s *State) Update(blockNumber uint64, update *StateUpdate, declaredClasses return err } + if err = s.UpdateNoVerify(blockNumber, update.StateDiff, declaredClasses); err != nil { + return err + } + + return s.verifyStateUpdateRoot(update.NewRoot) +} + +func (s *State) UpdateNoVerify(blockNumber uint64, update *StateDiff, declaredClasses map[felt.Felt]Class) error { // register declared classes mentioned in stateDiff.deployedContracts and stateDiff.declaredClasses for cHash, class := range declaredClasses { - if err = s.putClass(&cHash, class, blockNumber); err != nil { + if err := s.putClass(&cHash, class, blockNumber); err != nil { return err } } - if err = s.updateDeclaredClassesTrie(update.StateDiff.DeclaredV1Classes, declaredClasses); err != nil { + if err := s.updateDeclaredClassesTrie(update.DeclaredV1Classes, declaredClasses); err != nil { return err } @@ -221,13 +274,13 @@ func (s *State) Update(blockNumber uint64, update *StateUpdate, declaredClasses } // register deployed contracts - for addr, classHash := range update.StateDiff.DeployedContracts { + for addr, classHash := range update.DeployedContracts { if err = s.putNewContract(stateTrie, &addr, classHash, blockNumber); err != nil { return err } } - if err = s.updateContracts(stateTrie, blockNumber, update.StateDiff, true); err != nil { + if err = s.updateContracts(stateTrie, blockNumber, update, true); err != nil { return err } @@ -235,7 +288,7 @@ func (s *State) Update(blockNumber uint64, update *StateUpdate, declaredClasses return err } - return s.verifyStateUpdateRoot(update.NewRoot) + return nil } var ( @@ -279,6 +332,77 @@ func (s *State) updateContracts(stateTrie *trie.Trie, blockNumber uint64, diff * return s.updateContractStorages(stateTrie, diff.StorageDiffs, blockNumber, logChanges) } +func (s *State) UpdateContractNoLog(paths, nonces, classes []*felt.Felt) error { + stateTrie, storageCloser, err := s.storage() + if err != nil { + return err + } + + for i, path := range paths { + nonce := nonces[i] + class := classes[i] + + contract, err := NewContractUpdater(path, s.txn) + if err != nil && !errors.Is(err, ErrContractNotDeployed) { + return err + } + if errors.Is(err, ErrContractNotDeployed) { + contract, err = DeployContract(path, class, s.txn) + if err != nil { + return err + } + } + + err = contract.Replace(class) + if err != nil { + return err + } + + err = contract.UpdateNonce(nonce) + if err != nil { + return err + } + + err = s.updateContractCommitment(stateTrie, contract) + if err != nil { + return err + } + } + + if err = storageCloser(); err != nil { + return err + } + return nil +} + +func (s *State) UpdateContractStorages(storages map[felt.Felt]map[felt.Felt]*felt.Felt) error { + stateTrie, storageCloser, err := s.storage() + if err != nil { + return err + } + + err = s.updateContractStorages(stateTrie, storages, 0, false) + if err != nil { + return err + } + + return storageCloser() +} + +func (s *State) UpdateContractStoragesKV(storages map[felt.Felt][]FeltKV) error { + stateTrie, storageCloser, err := s.storage() + if err != nil { + return err + } + + err = s.updateContractStoragesKV(stateTrie, storages, 0, false) + if err != nil { + return err + } + + return storageCloser() +} + // replaceContract replaces the class that a contract at a given address instantiates func (s *State) replaceContract(stateTrie *trie.Trie, addr, classHash *felt.Felt) (*felt.Felt, error) { contract, err := NewContractUpdater(addr, s.txn) @@ -342,6 +466,64 @@ func (s *State) Class(classHash *felt.Felt) (*DeclaredClass, error) { return &class, nil } +// StartsWith checks if the byte array 'a' starts with the byte array 'b' +func StartsWith(a, b []byte) bool { + // If b is longer than a, it can't be a prefix + if len(b) > len(a) { + return false + } + + // Compare the elements of a and b + for i := 0; i < len(b); i++ { + if a[i] != b[i] { + return false + } + } + + return true +} + +func (s *State) PrintIt() (*DeclaredClass, error) { + classKey := db.Class.Key(nil) + + it, err := s.txn.NewIterator() + if err != nil { + return nil, err + } + + it.Seek(classKey) + idx := 0 + printed := 0 + for it.Valid() && StartsWith(it.Key(), classKey) { + value, err := it.Value() + if err != nil { + return nil, err + } + + var class DeclaredClass + err = encoder.Unmarshal(value, &class) + if err != nil { + return nil, err + } + + if class.Class.Version() == 0 { + fmt.Printf("%d %x %d %d\n", idx, it.Key(), class.Class.Version(), len(value)) + if class.Class.Version() == 0 && len(value) < 20000 { + fmt.Printf("%x\n", value) + printed++ + if printed >= 2 { + return nil, nil + } + } + } + + idx++ + it.Next() + } + + return nil, nil +} + func (s *State) updateStorageBuffered(contractAddr *felt.Felt, updateDiff map[felt.Felt]*felt.Felt, blockNumber uint64, logChanges bool) ( *db.BufferedTransaction, error, ) { @@ -367,6 +549,106 @@ func (s *State) updateStorageBuffered(contractAddr *felt.Felt, updateDiff map[fe return bufferedTxn, nil } +func (s *State) updateStorageBufferedKV(contractAddr *felt.Felt, updateDiff []FeltKV, blockNumber uint64, logChanges bool) ( + *db.BufferedTransaction, error, +) { + // to avoid multiple transactions writing to s.txn, create a buffered transaction and use that in the worker goroutine + bufferedTxn := db.NewBufferedTransaction(s.txn) + bufferedState := NewState(bufferedTxn) + bufferedContract, err := NewContractUpdater(contractAddr, bufferedTxn) + if err != nil { + return nil, err + } + + onValueChanged := func(location, oldValue *felt.Felt) error { + if logChanges { + return bufferedState.LogContractStorage(contractAddr, location, oldValue, blockNumber) + } + return nil + } + + if err = bufferedContract.UpdateStorageKV(updateDiff, onValueChanged); err != nil { + return nil, err + } + + return bufferedTxn, nil +} + +type FeltKV struct { + Key *felt.Felt + Value *felt.Felt +} + +// updateContractStorage applies the diff set to the Trie of the +// contract at the given address in the given Txn context. +func (s *State) updateContractStoragesKV(stateTrie *trie.Trie, diffs map[felt.Felt][]FeltKV, + blockNumber uint64, logChanges bool, +) error { + // make sure all noClassContracts are deployed + for addr := range diffs { + if _, ok := noClassContracts[addr]; !ok { + continue + } + + _, err := NewContractUpdater(&addr, s.txn) + if err != nil { + if !errors.Is(err, ErrContractNotDeployed) { + return err + } + // Deploy noClassContract + err = s.putNewContract(stateTrie, &addr, noClassContractsClassHash, blockNumber) + if err != nil { + return err + } + } + } + + // sort the contracts in decending diff size order + // so we start with the heaviest update first + keys := make([]felt.Felt, 0, len(diffs)) + for key := range diffs { + keys = append(keys, key) + } + sort.SliceStable(keys, func(i, j int) bool { + return len(diffs[keys[i]]) > len(diffs[keys[j]]) + }) + + // update per-contract storage Tries concurrently + contractUpdaters := pool.NewWithResults[*db.BufferedTransaction]().WithErrors().WithMaxGoroutines(runtime.GOMAXPROCS(0)) + for _, key := range keys { + conractAddr := key + updateDiff := diffs[conractAddr] + contractUpdaters.Go(func() (*db.BufferedTransaction, error) { + return s.updateStorageBufferedKV(&conractAddr, updateDiff, blockNumber, logChanges) + }) + } + + bufferedTxns, err := contractUpdaters.Wait() + if err != nil { + return err + } + + // flush buffered txns + for _, bufferedTxn := range bufferedTxns { + if err = bufferedTxn.Flush(); err != nil { + return err + } + } + + for addr := range diffs { + contract, err := NewContractUpdater(&addr, s.txn) + if err != nil { + return err + } + + if err = s.updateContractCommitment(stateTrie, contract); err != nil { + return err + } + } + + return nil +} + // updateContractStorage applies the diff set to the Trie of the // contract at the given address in the given Txn context. func (s *State) updateContractStorages(stateTrie *trie.Trie, diffs map[felt.Felt]map[felt.Felt]*felt.Felt, diff --git a/core/trie/key.go b/core/trie/key.go index 7f0e6af609..a451cb25ee 100644 --- a/core/trie/key.go +++ b/core/trie/key.go @@ -153,3 +153,61 @@ func (k *Key) RemoveLastBit() { inUseBytes[0] = (inUseBytes[0] << unusedBitsCount) >> unusedBitsCount } } + +// CmpAligned is Cmp as if the value is bigendian bytes of key of the same length +func (k Key) CmpAligned(other *Key) int { + // No its not aligned, so need to convert to bigint then left shift it so that the MSB is of the same index + height := k.len + if other.len > height { + height = other.len + } + + b1i := k.alignedBitInt(height) + b2i := other.alignedBitInt(height) + return b1i.Cmp(b2i) +} + +func (k Key) alignedBitInt(height uint8) *big.Int { + theint := &big.Int{} + theint = theint.SetBytes(k.bitset[:]) + if k.len < height { + theint = theint.Lsh(theint, uint(height-k.len)) + } + + return theint +} + +func (k *Key) AppendBitMut(flag bool) { + const LSB = uint8(0x1) + bit := k.len + byteIdx := bit / 8 + byteAtIdx := k.bitset[len(k.bitset)-int(byteIdx)-1] + bitIdx := bit % 8 + + // I'm sure someone will make this nicer + if flag { + byteAtIdx |= LSB << bitIdx + } else { + byteAtIdx &= ^(LSB << bitIdx) + } + + k.len++ + k.bitset[len(k.bitset)-int(byteIdx)-1] = byteAtIdx +} + +func (k Key) Append(otherKey *Key) Key { + result := NewKey(otherKey.len, otherKey.bitset[:]) + + // I'm sure someone will make this faster + for i := uint8(0); i < k.len; i++ { + result.AppendBitMut(k.Test(i)) + } + + return result +} + +func (k Key) AppendBit(flag bool) Key { + result := NewKey(0, []byte{}) + result.AppendBitMut(flag) + return k.Append(&result) +} diff --git a/core/trie/key_test.go b/core/trie/key_test.go index 8d56a31e0c..a16299a019 100644 --- a/core/trie/key_test.go +++ b/core/trie/key_test.go @@ -2,6 +2,7 @@ package trie_test import ( "bytes" + "fmt" "testing" "github.com/NethermindEth/juno/core/felt" @@ -153,3 +154,120 @@ func TestTruncate(t *testing.T) { }) } } + +func Test_cmp(t *testing.T) { + tests := []struct { + n1 int + n2 int + isHigher bool + }{ + { + n1: 10, + n2: 0, + isHigher: true, + }, + { + n1: 5, + n2: 0, + isHigher: true, + }, + { + n1: 5, + n2: 4, + isHigher: true, + }, + { + n1: 5, + n2: 5, + isHigher: false, + }, + { + n1: 4, + n2: 5, + isHigher: false, + }, + { + n1: 0, + n2: 5, + isHigher: false, + }, + { + n1: 300, + n2: 1, + isHigher: true, + }, + { + n1: 1, + n2: 300, + isHigher: false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d %d %v", test.n1, test.n2, test.isHigher), func(t *testing.T) { + k1 := numToKey(test.n1) + k2 := numToKey(test.n2) + + assert.Equal(t, + k1.CmpAligned(&k2) > 0, + test.isHigher) + }) + } +} + +func numToKey(num int) trie.Key { + return trie.NewKey(8, []byte{byte(num)}) +} + +func TestKeyAppend(t *testing.T) { + tests := map[string]struct { + Key1 trie.Key + Key2 trie.Key + ExpectedKey trie.Key + }{ + "no append": { + Key1: trie.NewKey(1, []byte{0x01}), + Key2: trie.NewKey(0, []byte{0x00}), + ExpectedKey: trie.NewKey(1, []byte{0x01}), + }, + "from zero append": { + Key1: trie.NewKey(0, []byte{0x00}), + Key2: trie.NewKey(1, []byte{0x01}), + ExpectedKey: trie.NewKey(1, []byte{0x01}), + }, + "append shift": { + Key1: trie.NewKey(1, []byte{0x01}), + Key2: trie.NewKey(7, []byte{0x00}), + ExpectedKey: trie.NewKey(8, []byte{0x80}), + }, + "append to a new byte": { + Key1: trie.NewKey(8, []byte{0xff}), + Key2: trie.NewKey(1, []byte{0x01}), + ExpectedKey: trie.NewKey(9, []byte{0x01, 0xff}), + }, + "append multi byte": { + Key1: trie.NewKey(11, []byte{0x00, 0xff}), // 000 1111 1111 + Key2: trie.NewKey(12, []byte{0x00, 0xff}), // 0000 1111 1111 + ExpectedKey: trie.NewKey(23, []byte{0x0f, 0xf0, 0xff}), // 000 1111 1111 0000 1111 1111 + }, + } + + for desc, test := range tests { + t.Run(desc, func(t *testing.T) { + appended := test.Key1.Append(&test.Key2) + assert.Equal(t, test.ExpectedKey, appended) + }) + } +} + +func TestKeyAppendBit(t *testing.T) { + k1 := trie.NewKey(1, []byte{0x01}) + k2 := k1.AppendBit(true) + expected := trie.NewKey(2, []byte{0x03}) + assert.Equal(t, k2, expected) + + k1 = trie.NewKey(1, []byte{0x00}) + k2 = k1.AppendBit(true) + expected = trie.NewKey(2, []byte{0x01}) + assert.Equal(t, k2, expected) +} diff --git a/core/trie/proof.go b/core/trie/proof.go index 517ae60764..0744a9a119 100644 --- a/core/trie/proof.go +++ b/core/trie/proof.go @@ -674,3 +674,11 @@ func BuildTrie(leftProofPath, rightProofPath []StorageNode, keys, values []*felt } return tempTrie, nil } + +func (t *Trie) RangeProof(startPath, endPath *felt.Felt) ([]ProofNode, error) { + // TODO: Do this properly + const trieHeight = 251 + bts := startPath.Bytes() + k := NewKey(trieHeight, bts[:]) + return GetProof(&k, t) +} diff --git a/core/trie/snap_support.go b/core/trie/snap_support.go new file mode 100644 index 0000000000..7c2ae76363 --- /dev/null +++ b/core/trie/snap_support.go @@ -0,0 +1,203 @@ +package trie + +import ( + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/utils" +) + +func (t *Trie) IterateAndGenerateProof(startValue *felt.Felt, consumer func(key, value *felt.Felt) (bool, error), +) ([]ProofNode, bool, error) { + var lastKey *felt.Felt + + finished, err := t.Iterate(startValue, func(key, value *felt.Felt) (bool, error) { + lastKey = key + + return consumer(key, value) + }) + if err != nil { + return nil, finished, err + } + + proofset := map[felt.Felt]ProofNode{} + + // If start value is null && finished, you dont need to provide any proof at all + if !finished || startValue != nil { + feltBts := startValue.Bytes() + startKey := NewKey(t.height, feltBts[:]) + // Yes, the left proof is actually for the start query, not the actual leaf. Very confusing, yea I know. Need to + // actually check that the server did not skip leafs. + leftProof, err := GetProof(&startKey, t) + if err != nil { + return nil, finished, err + } + for _, proof := range leftProof { + // Well.. using the trie hash here is kinda slow... but I just need it to work right now. + proofset[*proof.Hash(t.hash)] = proof + } + } + + if !finished && lastKey != nil { + feltBts := lastKey.Bytes() + lastKey := NewKey(t.height, feltBts[:]) + rightProof, err := GetProof(&lastKey, t) + if err != nil { + return nil, finished, err + } + + for _, proof := range rightProof { + proofset[*proof.Hash(t.hash)] = proof + } + } + + proofs := make([]ProofNode, 0, len(proofset)) + for _, node := range proofset { + proofs = append(proofs, node) + } + + return proofs, finished, nil +} + +func (t *Trie) IterateWithLimit( + startAddr *felt.Felt, + limitAddr *felt.Felt, + maxNodes uint32, + // TODO: remove the logger - and move to the tree + logger utils.SimpleLogger, + consumer func(key, value *felt.Felt) error, +) ([]ProofNode, bool, error) { + pathes := make([]*felt.Felt, 0) + hashes := make([]*felt.Felt, 0) + + count := uint32(0) + proof, finished, err := t.IterateAndGenerateProof(startAddr, func(key *felt.Felt, value *felt.Felt) (bool, error) { + // Need at least one. + if limitAddr != nil && key.Cmp(limitAddr) > 0 { + return true, nil + } + + pathes = append(pathes, key) + hashes = append(hashes, value) + + err := consumer(key, value) + if err != nil { + logger.Errorw("error from consumer function", "err", err) + return false, err + } + + count++ + if count >= maxNodes { + return false, nil + } + return true, nil + }) + if err != nil { + logger.Errorw("IterateAndGenerateProof", "err", err, "finished", finished) + return nil, finished, err + } + + return proof, finished, err +} + +func VerifyRange(root, startKey *felt.Felt, keys, values []*felt.Felt, proofs []ProofNode, hash hashFunc, + treeHeight uint8, +) (hasMore, valid bool, oerr error) { + proofMap := map[felt.Felt]ProofNode{} + for _, proof := range proofs { + proofHash := proof.Hash(hash) + proofMap[*proofHash] = proof + } + + if len(proofMap) == 0 && startKey == nil { + // Special case where the whole trie is sent in one go. + // We just need to completely reconstruct the trie. + + tempTrie, err := newTrie(newMemStorage(), treeHeight, hash) + if err != nil { + return false, false, err + } + + for i, key := range keys { + _, err = tempTrie.Put(key, values[i]) + if err != nil { + return false, false, err + } + } + + recalculatedRoot, err := tempTrie.Root() + if err != nil { + return false, false, err + } + + if !root.Equal(recalculatedRoot) { + return false, false, nil + } + + return false, true, nil + } + + if _, ok := proofMap[*root]; !ok { + // Verification failure, root not included in proof. + return false, false, nil + } + + proofKeys := map[felt.Felt]Key{} + err := buildKeys(NewKey(0, []byte{}), root, proofMap, proofKeys, 0) + if err != nil { + return false, false, err + } + + // TODO: Verify here proof here + + hasMoreKeyCheck := startKey + if len(keys) > 0 { + hasMoreKeyCheck = keys[len(keys)-1] + } + + feltBytes := hasMoreKeyCheck.Bytes() + hasMoreKeyCheckKey := NewKey(treeHeight, feltBytes[:]) + + // does this actually work on all case? + hasMore = false + for _, key := range proofKeys { + comparison := key.CmpAligned(&hasMoreKeyCheckKey) + if comparison > 0 { + hasMore = true + } + } + + return hasMore, true, nil +} + +func buildKeys(currentKey Key, currentNode *felt.Felt, proofMap map[felt.Felt]ProofNode, keys map[felt.Felt]Key, depth int) error { + keys[*currentNode] = currentKey + proofNode, ok := proofMap[*currentNode] + if !ok { + return nil + } + + switch node := proofNode.(type) { + case *Edge: + chKey := currentKey.Append(node.Path) + ch := node.Child + err := buildKeys(chKey, ch, proofMap, keys, depth+1) + if err != nil { + return err + } + case *Binary: + chKey := currentKey.AppendBit(false) + ch := node.LeftHash + err := buildKeys(chKey, ch, proofMap, keys, depth+1) + if err != nil { + return err + } + + chKey = currentKey.AppendBit(true) + ch = node.RightHash + err = buildKeys(chKey, ch, proofMap, keys, depth+1) + if err != nil { + return err + } + } + + return nil +} diff --git a/core/trie/snap_support_test.go b/core/trie/snap_support_test.go new file mode 100644 index 0000000000..dd1264c29c --- /dev/null +++ b/core/trie/snap_support_test.go @@ -0,0 +1,498 @@ +package trie_test + +import ( + "fmt" + "math" + "testing" + + "github.com/NethermindEth/juno/core/crypto" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/core/trie" + "github.com/NethermindEth/juno/db" + "github.com/NethermindEth/juno/db/pebble" + "github.com/NethermindEth/juno/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const trieHeight = 251 + +func TestRangeAndVerify(t *testing.T) { + scenarios := []struct { + name string + startQuery *felt.Felt + limitQuery *felt.Felt + maxNode int + expectedKeyCount int + hasMore bool + noProof bool + }{ + { + name: "all", + startQuery: numToFelt(0), + expectedKeyCount: 10, + hasMore: false, + }, + { + name: "all without start query", + expectedKeyCount: 10, + hasMore: false, + noProof: true, + }, + { + name: "start in the middle", + startQuery: numToFelt(500), + expectedKeyCount: 5, + hasMore: false, + }, + { + name: "start with limit query", + startQuery: numToFelt(100), + limitQuery: numToFelt(500), + expectedKeyCount: 5, + hasMore: true, + }, + { + name: "start with limit query and node count limit", + startQuery: numToFelt(100), + limitQuery: numToFelt(500), + maxNode: 3, + expectedKeyCount: 3, + hasMore: true, + }, + { + name: "finished before limit query", + startQuery: numToFelt(100), + limitQuery: numToFelt(20000), + expectedKeyCount: 9, + hasMore: false, + }, + { + name: "last one right after limit query", + startQuery: numToFelt(100), + limitQuery: numToFelt(900), + expectedKeyCount: 9, + hasMore: false, + }, + { + name: "two leaf after limit query, last leaf skipped", + startQuery: numToFelt(100), + limitQuery: numToFelt(800), + expectedKeyCount: 8, + hasMore: true, + }, + { + name: "no node between start and limit", + startQuery: numToFelt(450), + limitQuery: numToFelt(451), + expectedKeyCount: 1, + hasMore: true, + }, + { + name: "start query after last node", + startQuery: numToFelt(10000), + expectedKeyCount: 0, + hasMore: false, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + storage := trie.NewStorage(db.NewMemTransaction(), []byte{}) + testTrie, err := trie.NewTriePedersen(storage, 251) + assert.NoError(t, err) + + for i := 0; i < 10; i++ { + _, err = testTrie.Put(numToFelt(i*100+1), numToFelt(i*100+2)) + assert.NoError(t, err) + } + + expectedRoot, err := testTrie.Root() + assert.NoError(t, err) + + startQuery := scenario.startQuery + var keys []*felt.Felt + var values []*felt.Felt + + proofs, _, err := testTrie.IterateAndGenerateProof(startQuery, func(key, value *felt.Felt) (bool, error) { + keys = append(keys, key) + values = append(values, value) + if scenario.maxNode > 0 && len(keys) >= scenario.maxNode { + return false, nil + } + if scenario.limitQuery != nil && key.Cmp(scenario.limitQuery) > 0 { + // Last (one after limit) is included. + return false, nil + } + return true, nil + }) + assert.NoError(t, err) + + assert.Equal(t, scenario.expectedKeyCount, len(keys)) + if scenario.noProof { + assert.Empty(t, proofs) + } + + hasMore, valid, err := trie.VerifyRange(expectedRoot, startQuery, keys, values, proofs, crypto.Pedersen, trieHeight) + assert.NoError(t, err) + assert.True(t, valid) + + assert.Equal(t, scenario.hasMore, hasMore) + }) + } +} + +func TestRangeAndVerifyReject(t *testing.T) { + scenarios := []struct { + name string + startQuery *felt.Felt + skip bool + maxNode int + mutator func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) + }{ + { + name: "missing proofs", + startQuery: numToFelt(500), + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + return keys, values, nil + }, + }, + { + name: "missing leaf when all node requested", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + return keys[1:], values[1:], nil + }, + }, + { + skip: true, + name: "missing part of keys at start", + startQuery: numToFelt(500), + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + return keys[1:], values[1:], proofs + }, + }, + { + skip: true, + name: "missing part of keys at end", + startQuery: numToFelt(500), + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + return keys[:len(keys)-1], values[:len(keys)-1], proofs + }, + }, + { + skip: true, + name: "missing part of keys in the middle", + startQuery: numToFelt(500), + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + newkeys := []*felt.Felt{} + newvalues := []*felt.Felt{} + newkeys = append(newkeys, keys[:2]...) + newvalues = append(newvalues, values[:2]...) + newkeys = append(newkeys, keys[3:]...) + newvalues = append(newvalues, values[3:]...) + + return newkeys, newvalues, proofs + }, + }, + { + name: "missing part of keys in the middle when whole trie is sent", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + newkeys := []*felt.Felt{} + newvalues := []*felt.Felt{} + newkeys = append(newkeys, keys[:2]...) + newvalues = append(newvalues, values[:2]...) + newkeys = append(newkeys, keys[3:]...) + newvalues = append(newvalues, values[3:]...) + + return newkeys, newvalues, proofs + }, + }, + { + name: "value changed", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + values[3] = numToFelt(10000) + return keys, values, proofs + }, + }, + { + skip: true, + startQuery: numToFelt(500), + name: "value changed when whole trie is sent", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + values[3] = numToFelt(10000) + return keys, values, proofs + }, + }, + { + name: "key changed", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + keys[3] = numToFelt(10000) + return keys, values, proofs + }, + }, + { + skip: true, + startQuery: numToFelt(500), + name: "key changed when whole trie is sent", + mutator: func(keys, values []*felt.Felt, proofs []trie.ProofNode) ([]*felt.Felt, []*felt.Felt, []trie.ProofNode) { + keys[3] = numToFelt(10000) + return keys, values, proofs + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + if scenario.skip { + t.Skip() + } + + storage := trie.NewStorage(db.NewMemTransaction(), []byte{}) + testTrie, err := trie.NewTriePedersen(storage, 251) + assert.NoError(t, err) + + for i := 0; i < 10; i++ { + _, err = testTrie.Put(numToFelt(i*100+1), numToFelt(i*100+2)) + assert.NoError(t, err) + } + + expectedRoot, err := testTrie.Root() + assert.NoError(t, err) + + startQuery := scenario.startQuery + var keys []*felt.Felt + var values []*felt.Felt + + proofs, _, err := testTrie.IterateAndGenerateProof(startQuery, func(key, value *felt.Felt) (bool, error) { + keys = append(keys, key) + values = append(values, value) + if scenario.maxNode > 0 && len(keys) >= scenario.maxNode { + return false, nil + } + return true, nil + }) + assert.NoError(t, err) + + keys, values, proofs = scenario.mutator(keys, values, proofs) + + _, valid, err := trie.VerifyRange(expectedRoot, startQuery, keys, values, proofs, crypto.Pedersen, trieHeight) + assert.NoError(t, err) + assert.False(t, valid) + }) + } +} + +func TestIterateOverTrie(t *testing.T) { + memdb := pebble.NewMemTest(t) + txn, err := memdb.NewTransaction(true) + require.NoError(t, err) + logger := utils.NewNopZapLogger() + + tempTrie, err := trie.NewTriePedersen(trie.NewStorage(txn, []byte{0}), 251) + require.NoError(t, err) + + // key ranges + var ( + bigPrefix = uint64(1000 * 1000 * 1000 * 1000) + count = 100 + ranges = 5 + fstInt, lstInt uint64 + fstKey, lstKey *felt.Felt + ) + for i := range ranges { + for j := range count { + lstInt = bigPrefix*uint64(i) + uint64(count+j) + lstKey = new(felt.Felt).SetUint64(lstInt) + value := new(felt.Felt).SetUint64(uint64(10*count + j + i)) + + if fstKey == nil { + fstKey = lstKey + fstInt = lstInt + } + + _, err := tempTrie.Put(lstKey, value) + require.NoError(t, err) + } + } + + maxNodes := uint32(ranges*count + 1) + startZero := felt.Zero.Clone() + + visitor := func(start, limit *felt.Felt, max uint32) (int, bool, *felt.Felt, *felt.Felt) { + visited := 0 + var fst, lst *felt.Felt + _, finish, err := tempTrie.IterateWithLimit( + start, + limit, + max, + logger, + func(key, value *felt.Felt) error { + if fst == nil { + fst = key + } + lst = key + visited++ + return nil + }) + require.NoError(t, err) + return visited, finish, fst, lst + } + + t.Run("iterate without limit", func(t *testing.T) { + expectedLeaves := ranges * count + visited, finish, fst, lst := visitor(nil, nil, maxNodes) + require.Equal(t, expectedLeaves, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, lstKey, lst) + fmt.Println("Visited:", visited, "\tFinish:", finish, "\tRange:", fst.Uint64(), "-", lst.Uint64()) + }) + + t.Run("iterate over trie im chunks", func(t *testing.T) { + chunkSize := 77 + lstChunkSize := int(math.Mod(float64(ranges*count), float64(chunkSize))) + startKey := startZero + for { + visited, finish, fst, lst := visitor(startKey, nil, uint32(chunkSize)) + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange:", fst.Uint64(), "-", lst.Uint64()) + if finish { + require.Equal(t, lstChunkSize, visited) + break + } + require.Equal(t, chunkSize, visited) + require.False(t, finish) + startKey = new(felt.Felt).SetUint64(lst.Uint64() + 1) + } + }) + + t.Run("iterate over trie im groups", func(t *testing.T) { + startKey := startZero + for { + visited, finish, fst, lst := visitor(startKey, nil, uint32(count)) + if finish { + require.Equal(t, 0, visited) + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange: ") + break + } + fmt.Println("Finish:", finish, "\tstart:", startKey.Uint64(), "\trange:", fst.Uint64(), "-", lst.Uint64()) + require.Equal(t, count, visited) + require.False(t, finish) + if lst != nil { + startKey = new(felt.Felt).SetUint64(lst.Uint64() + 1) + } + } + }) + + t.Run("stop before first key", func(t *testing.T) { + lowerBound := new(felt.Felt).SetUint64(fstInt - 1) + visited, finish, _, _ := visitor(startZero, lowerBound, maxNodes) + require.True(t, finish) + require.Equal(t, 0, visited) + }) + + t.Run("first key is a limit", func(t *testing.T) { + visited, finish, fst, lst := visitor(startZero, fstKey, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstKey, lst) + }) + + t.Run("start is the last key", func(t *testing.T) { + visited, finish, fst, lst := visitor(lstKey, nil, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, lstKey, fst) + require.Equal(t, lstKey, lst) + }) + + t.Run("start and limit are the last key", func(t *testing.T) { + visited, finish, fst, lst := visitor(lstKey, lstKey, maxNodes) + require.Equal(t, 1, visited) + require.True(t, finish) + require.Equal(t, lstKey, fst) + require.Equal(t, lstKey, lst) + }) + + t.Run("iterate after last key yields no key", func(t *testing.T) { + upperBound := new(felt.Felt).SetUint64(lstInt + 1) + visited, finish, fst, _ := visitor(upperBound, nil, maxNodes) + require.Equal(t, 0, visited) + require.True(t, finish) + require.Nil(t, fst) + }) + + t.Run("iterate with reversed bounds yields no key", func(t *testing.T) { + visited, finish, fst, _ := visitor(lstKey, fstKey, maxNodes) + require.Equal(t, 0, visited) + require.True(t, finish) + require.Nil(t, fst) + }) + + t.Run("iterate over the first group", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(fstKey, fstGrpBound, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the first group no lower bound", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(nil, fstGrpBound, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the first group by max nodes", func(t *testing.T) { + fstGrpBound := new(felt.Felt).SetUint64(fstInt + uint64(count-1)) + visited, finish, fst, lst := visitor(fstKey, nil, uint32(count)) + require.Equal(t, count, visited) + require.False(t, finish) + require.Equal(t, fstKey, fst) + require.Equal(t, fstGrpBound, lst) + }) + + t.Run("iterate over the last group, start before group bound", func(t *testing.T) { + lstGrpStartInt := lstInt - uint64(count-1) + lstGrpFstKey := new(felt.Felt).SetUint64(lstGrpStartInt) + startKey := new(felt.Felt).SetUint64(lstGrpStartInt - uint64(count)) + + visited, finish, fst, lst := visitor(startKey, nil, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, lstGrpFstKey, fst) + require.Equal(t, lstKey, lst) + }) + + sndGrpFstKey := new(felt.Felt).SetUint64(bigPrefix + uint64(count)) + sndGrpLstKey := new(felt.Felt).SetUint64(bigPrefix + uint64(2*count-1)) + t.Run("second group key selection", func(t *testing.T) { + visited, _, _, lst := visitor(fstKey, nil, uint32(count+1)) + require.Equal(t, count+1, visited) + require.Equal(t, sndGrpFstKey, lst) + + visited, finish, fst, lst := visitor(sndGrpFstKey, sndGrpLstKey, maxNodes) + require.Equal(t, count, visited) + require.True(t, finish) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, sndGrpLstKey, lst) + }) + + t.Run("second group key selection 2", func(t *testing.T) { + nodeAfterFstGrp := new(felt.Felt).SetUint64(fstInt + uint64(count+1)) + visited, _, fst, lst := visitor(nodeAfterFstGrp, nil, 1) + require.Equal(t, 1, visited) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, fst, lst) + + visited, finish, fst, lst := visitor(sndGrpFstKey, nil, uint32(count)) + require.Equal(t, count, visited) + require.False(t, finish) + require.Equal(t, sndGrpFstKey, fst) + require.Equal(t, sndGrpLstKey, lst) + }) +} diff --git a/core/trie/trie.go b/core/trie/trie.go index c03357d3af..5ab0f2c481 100644 --- a/core/trie/trie.go +++ b/core/trie/trie.go @@ -11,10 +11,18 @@ import ( "github.com/NethermindEth/juno/core/crypto" "github.com/NethermindEth/juno/core/felt" "github.com/NethermindEth/juno/db" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) type hashFunc func(*felt.Felt, *felt.Felt) *felt.Felt +type IterableStorage interface { + IterateLeaf(startKey *Key, consumer func(key, value *felt.Felt) (bool, error)) (bool, error) +} + +type HashFunc func(*felt.Felt, *felt.Felt) *felt.Felt + // Trie is a dense Merkle Patricia Trie (i.e., all internal nodes have two children). // // This implementation allows for a "flat" storage by keying nodes on their path rather than @@ -43,6 +51,10 @@ type Trie struct { rootKeyIsDirty bool } +func (t *Trie) GetRootKey() *Key { + return t.rootKey +} + type NewTrieFunc func(*Storage, uint8) (*Trie, error) func NewTriePedersen(storage *Storage, height uint8) (*Trie, error) { @@ -53,6 +65,10 @@ func NewTriePoseidon(storage *Storage, height uint8) (*Trie, error) { return newTrie(storage, height, crypto.Poseidon) } +func NewTrie(storage *Storage, height uint8, hash HashFunc) (*Trie, error) { + return newTrie(storage, height, hashFunc(hash)) +} + func newTrie(storage *Storage, height uint8, hash hashFunc) (*Trie, error) { if height > felt.Bits { return nil, fmt.Errorf("max trie height is %d, got: %d", felt.Bits, height) @@ -321,8 +337,14 @@ func (t *Trie) insertOrUpdateValue(nodeKey *Key, node *Node, nodes []StorageNode return nil } +var triePut = promauto.NewCounter(prometheus.CounterOpts{ + Name: "juno_trie_put", + Help: "trie put", +}) + // Put updates the corresponding `value` for a `key` func (t *Trie) Put(key, value *felt.Felt) (*felt.Felt, error) { + triePut.Inc() if key.Cmp(t.maxKey) > 0 { return nil, fmt.Errorf("key %s exceeds trie height %d", key, t.height) } @@ -715,3 +737,50 @@ func (t *Trie) dump(level int, parentP *Key) { storage: t.storage, }).dump(level+1, t.rootKey) } + +// Iterate the trie from startValue in ascending order until the consumer returned false or an error occur or end of +// trie was reached. Return true if end of trie is reached. +// TODO: its much more efficient to iterate from the txn level. But even without that, if the leaf are ordered correctly, +// block cache should have a pretty good hit rate. +func (t *Trie) Iterate(startValue *felt.Felt, consumer func(key, value *felt.Felt) (bool, error)) (bool, error) { + if startValue == nil { + startValue = &felt.Zero + } + startKey := t.feltToKey(startValue) + + return t.doIterate(&startKey, t.rootKey, consumer) +} + +// doIterate returns false if the end of the trie is reached, true otherwise +func (t *Trie) doIterate(startKey, key *Key, consumer func(key, value *felt.Felt) (bool, error)) (bool, error) { + if key == nil { + return false, nil + } + + node, err := t.storage.Get(key) + if err != nil { + return false, err + } + + if key.Len() == t.height { + if startKey.CmpAligned(key) > 0 { + return true, nil + } + keyAsFelt := key.Felt() + return consumer(&keyAsFelt, node.Value) + } + + // If the startKey is higher than the right node, no point in going to left at all + if startKey.CmpAligned(node.Right) < 0 { + next, err := t.doIterate(startKey, node.Left, consumer) + if err != nil { + return false, err + } + + if !next { + return false, nil + } + } + + return t.doIterate(startKey, node.Right, consumer) +} diff --git a/core/trie/trie_test.go b/core/trie/trie_test.go index fb5460739d..6ed50be639 100644 --- a/core/trie/trie_test.go +++ b/core/trie/trie_test.go @@ -1,6 +1,7 @@ package trie_test import ( + "math/big" "strconv" "testing" @@ -375,3 +376,154 @@ func BenchmarkTriePut(b *testing.B) { return t.Commit() })) } + +func TestTrieIterate(t *testing.T) { + t.Run("iterate standard", func(t *testing.T) { + require.NoError(t, trie.RunOnTempTriePedersen(251, func(tempTrie *trie.Trie) error { + expectedKeys := []*felt.Felt{} + expectedValues := []*felt.Felt{} + for i := 0; i < 2; i++ { + key := new(felt.Felt).SetUint64(uint64(i*10 + 1)) + val := new(felt.Felt).SetUint64(uint64(i + 1)) + + expectedKeys = append(expectedKeys, key) + expectedValues = append(expectedValues, val) + + _, err := tempTrie.Put(key, val) + require.NoError(t, err) + } + + startAddr := new(felt.Felt).SetUint64(0) + keys := []*felt.Felt{} + values := []*felt.Felt{} + finished, err := tempTrie.Iterate(startAddr, func(key, value *felt.Felt) (bool, error) { + keys = append(keys, key) + values = append(values, value) + return true, nil + }) + + assert.Nil(t, err) + assert.True(t, finished) + + assert.Equal(t, expectedKeys, keys) + assert.Equal(t, expectedValues, values) + + return nil + })) + }) +} + +func numToFelt(num int) *felt.Felt { + return numToFeltBigInt(big.NewInt(int64(num))) +} + +func numToFeltBigInt(num *big.Int) *felt.Felt { + f := felt.Zero + return f.SetBigInt(num) +} + +func TestTrie_Iterate(t *testing.T) { + tr, err := trie.NewTriePedersen(trie.NewStorage(db.NewMemTransaction(), []byte{1}), 251) + assert.Nil(t, err) + + for i := 0; i < 10; i++ { + _, err = tr.Put(numToFelt(i*10), numToFelt(i+10)) + assert.Nil(t, err) + } + err = tr.Commit() + assert.Nil(t, err) + + tests := []struct { + name string + startKey *felt.Felt + count int + expectedKeys []*felt.Felt + expectedValues []*felt.Felt + }{ + { + name: "all", + startKey: numToFelt(0), + count: 10, + expectedKeys: []*felt.Felt{ + numToFelt(0), + numToFelt(10), + numToFelt(20), + numToFelt(30), + numToFelt(40), + numToFelt(50), + numToFelt(60), + numToFelt(70), + numToFelt(80), + numToFelt(90), + }, + expectedValues: []*felt.Felt{ + numToFelt(10), + numToFelt(11), + numToFelt(12), + numToFelt(13), + numToFelt(14), + numToFelt(15), + numToFelt(16), + numToFelt(17), + numToFelt(18), + numToFelt(19), + }, + }, + { + name: "limited", + startKey: numToFelt(0), + count: 2, + expectedKeys: []*felt.Felt{ + numToFelt(0), + numToFelt(10), + }, + expectedValues: []*felt.Felt{ + numToFelt(10), + numToFelt(11), + }, + }, + { + name: "limited with offset", + startKey: numToFelt(30), + count: 2, + expectedKeys: []*felt.Felt{ + numToFelt(30), + numToFelt(40), + }, + expectedValues: []*felt.Felt{ + numToFelt(13), + numToFelt(14), + }, + }, + { + name: "limited with offset that does not match a leaf", + startKey: numToFelt(25), + count: 2, + expectedKeys: []*felt.Felt{ + numToFelt(30), + numToFelt(40), + }, + expectedValues: []*felt.Felt{ + numToFelt(13), + numToFelt(14), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + keys := make([]*felt.Felt, 0) + values := make([]*felt.Felt, 0) + + _, err := tr.Iterate(test.startKey, func(key *felt.Felt, value *felt.Felt) (bool, error) { + keys = append(keys, key) + values = append(values, value) + return len(keys) < test.count, nil + }) + assert.Nil(t, err) + + assert.Equal(t, test.expectedKeys, keys) + assert.Equal(t, test.expectedValues, values) + }) + } +} diff --git a/db/db.go b/db/db.go index 6475dc5e19..1ff7b101f7 100644 --- a/db/db.go +++ b/db/db.go @@ -33,6 +33,8 @@ type DB interface { // WithListener registers an EventListener WithListener(listener EventListener) DB + + PersistedView() (Transaction, func() error, error) } // Iterator is an iterator over a DB's key/value pairs. diff --git a/db/pebble/batch.go b/db/pebble/batch.go index 60747d6a86..5646f341c8 100644 --- a/db/pebble/batch.go +++ b/db/pebble/batch.go @@ -14,14 +14,15 @@ var _ db.Transaction = (*batch)(nil) type batch struct { batch *pebble.Batch - lock *sync.Mutex + dbLock *sync.Mutex + rwlock sync.RWMutex listener db.EventListener } -func NewBatch(dbBatch *pebble.Batch, lock *sync.Mutex, listener db.EventListener) *batch { +func NewBatch(dbBatch *pebble.Batch, dbLock *sync.Mutex, listener db.EventListener) *batch { return &batch{ batch: dbBatch, - lock: lock, + dbLock: dbLock, listener: listener, } } @@ -34,8 +35,8 @@ func (b *batch) Discard() error { err := b.batch.Close() b.batch = nil - b.lock.Unlock() - b.lock = nil + b.dbLock.Unlock() + b.dbLock = nil return err } @@ -53,6 +54,9 @@ func (b *batch) Commit() error { // Set : see db.Transaction.Set func (b *batch) Set(key, val []byte) error { + b.rwlock.Lock() + defer b.rwlock.Unlock() + start := time.Now() if len(key) == 0 { return errors.New("empty key") @@ -69,6 +73,9 @@ func (b *batch) Set(key, val []byte) error { // Delete : see db.Transaction.Delete func (b *batch) Delete(key []byte) error { + b.rwlock.Lock() + defer b.rwlock.Unlock() + if b.batch == nil { return ErrDiscardedTransaction } @@ -81,6 +88,9 @@ func (b *batch) Delete(key []byte) error { // Get : see db.Transaction.Get func (b *batch) Get(key []byte, cb func([]byte) error) error { + b.rwlock.RLock() + defer b.rwlock.RUnlock() + if b.batch == nil { return ErrDiscardedTransaction } diff --git a/db/pebble/db.go b/db/pebble/db.go index 5974edf720..079bbd064d 100644 --- a/db/pebble/db.go +++ b/db/pebble/db.go @@ -158,3 +158,14 @@ func CalculatePrefixSize(ctx context.Context, pDB *DB, prefix []byte) (*Item, er return item, utils.RunAndWrapOnError(it.Close, err) } + +// View : see db.DB.View +func (d *DB) PersistedView() (db.Transaction, func() error, error) { + txn, err := d.NewTransaction(false) + if err != nil { + return nil, nil, err + } + return txn, func() error { + return txn.Discard() + }, nil +} diff --git a/db/remote/db.go b/db/remote/db.go index 80f084edbe..45b1768a2c 100644 --- a/db/remote/db.go +++ b/db/remote/db.go @@ -2,6 +2,7 @@ package remote import ( "context" + "errors" "math" "time" @@ -80,3 +81,7 @@ func (d *DB) Close() error { func (d *DB) Impl() any { return d.kvClient } + +func (d *DB) PersistedView() (db.Transaction, func() error, error) { + return nil, nil, errors.New("persisted view not supported") +} diff --git a/migration/migration_pkg_test.go b/migration/migration_pkg_test.go index fbcc168a90..960541fc44 100644 --- a/migration/migration_pkg_test.go +++ b/migration/migration_pkg_test.go @@ -81,6 +81,7 @@ func TestRelocateContractStorageRootKeys(t *testing.T) { func TestRecalculateBloomFilters(t *testing.T) { testdb := pebble.NewMemTest(t) chain := blockchain.New(testdb, &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) @@ -165,6 +166,7 @@ func TestChangeTrieNodeEncoding(t *testing.T) { func TestCalculateBlockCommitments(t *testing.T) { testdb := pebble.NewMemTest(t) chain := blockchain.New(testdb, &utils.Mainnet) + defer chain.Close() client := feeder.NewTestClient(t, &utils.Mainnet) gw := adaptfeeder.New(client) diff --git a/node/node.go b/node/node.go index 38df2d9d16..fd30973832 100644 --- a/node/node.go +++ b/node/node.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "os" "reflect" "runtime" "time" @@ -72,12 +73,13 @@ type Config struct { MetricsHost string `mapstructure:"metrics-host"` MetricsPort uint16 `mapstructure:"metrics-port"` - P2P bool `mapstructure:"p2p"` - P2PAddr string `mapstructure:"p2p-addr"` - P2PPublicAddr string `mapstructure:"p2p-public-addr"` - P2PPeers string `mapstructure:"p2p-peers"` - P2PFeederNode bool `mapstructure:"p2p-feeder-node"` - P2PPrivateKey string `mapstructure:"p2p-private-key"` + P2P bool `mapstructure:"p2p"` + P2PAddr string `mapstructure:"p2p-addr"` + P2PPublicAddr string `mapstructure:"p2p-public-addr"` + P2PPeers string `mapstructure:"p2p-peers"` + P2PFeederNode bool `mapstructure:"p2p-feeder-node"` + P2PPrivateKey string `mapstructure:"p2p-private-key"` + P2PSyncMode p2p.SyncMode `mapstructure:"p2p-sync-mode"` MaxVMs uint `mapstructure:"max-vms"` MaxVMQueue uint `mapstructure:"max-vm-queue"` @@ -130,6 +132,9 @@ func New(cfg *Config, version string) (*Node, error) { //nolint:gocyclo,funlen chain := blockchain.New(database, &cfg.Network) + // TODO: close a blockchain? better way? + services = append(services, blockchain.NewBlockchainCloser(chain, log)) + // Verify that cfg.Network is compatible with the database. head, err := chain.Head() if err != nil && !errors.Is(err, db.ErrKeyNotFound) { @@ -179,8 +184,12 @@ func New(cfg *Config, version string) (*Node, error) { //nolint:gocyclo,funlen // Do not start the feeder synchronisation synchronizer = nil } + if os.Getenv("JUNO_P2P_NO_SYNC") != "" { // TODO(weiihann): remove this in the future + log.Warnw("Got 'JUNO_P2P_NO_SYNC' to not syncing from p2p network") + synchronizer = nil + } p2pService, err = p2p.New(cfg.P2PAddr, cfg.P2PPublicAddr, version, cfg.P2PPeers, cfg.P2PPrivateKey, cfg.P2PFeederNode, - chain, &cfg.Network, log, database) + cfg.P2PSyncMode, chain, &cfg.Network, log, database) if err != nil { return nil, fmt.Errorf("set up p2p service: %w", err) } @@ -376,6 +385,7 @@ func (n *Node) Run(ctx context.Context) { } <-ctx.Done() + // TODO: chain.Close() - which service should do this? n.log.Infow("Shutting down Juno...") } diff --git a/node/node_test.go b/node/node_test.go index a9287e0e4e..e9f718cba6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -74,6 +74,7 @@ func TestNetworkVerificationOnNonEmptyDB(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) require.NoError(t, syncer.Run(ctx)) cancel() + chain.Close() require.NoError(t, database.Close()) _, err = node.New(&node.Config{ diff --git a/p2p/downloader.go b/p2p/downloader.go new file mode 100644 index 0000000000..6bb2b466aa --- /dev/null +++ b/p2p/downloader.go @@ -0,0 +1,80 @@ +package p2p + +import ( + "context" + "os" + "sync/atomic" + + "github.com/NethermindEth/juno/blockchain" + "github.com/NethermindEth/juno/sync" + "github.com/NethermindEth/juno/utils" + "github.com/libp2p/go-libp2p/core/host" +) + +type Downloader struct { + isFeeder bool + mode atomic.Uint32 + baseSyncer *SyncService + snapSyncer *SnapSyncer + log utils.SimpleLogger +} + +func NewDownloader( + isFeeder bool, + syncMode SyncMode, + p2pHost host.Host, + network *utils.Network, + bc *blockchain.Blockchain, + log utils.SimpleLogger, +) *Downloader { + dl := &Downloader{ + isFeeder: isFeeder, + log: log, + } + + dl.baseSyncer = newSyncService(bc, p2pHost, network, log) + + var snapSyncer *SnapSyncer + if syncMode == SnapSync { + snapSyncer = NewSnapSyncer(dl.baseSyncer.Client(), bc, log) + } + dl.snapSyncer = snapSyncer + + // TODO: when syncing becomes more mature, we need a way to dynamically determine which sync mode to use + // For now, we will use the sync mode that is passed in the constructor + dl.mode.Store(uint32(syncMode)) + + return dl +} + +func (d *Downloader) Start(ctx context.Context) { + // Feeder node doesn't sync using P2P + if d.isFeeder { + return + } + + d.log.Infow("Downloader start", "mode", d.getMode()) + if d.getMode() == SnapSync { + // TODO: a hack, remove this + if os.Getenv("JUNO_P2P_NO_SYNC") == "" { + err := d.snapSyncer.Run(ctx) + if err != nil { + d.log.Errorw("Snapsyncer failed to start") + return + } + } else { + d.log.Infow("Syncing is disabled") + return + } + } + + d.baseSyncer.Start(ctx) +} + +func (d *Downloader) getMode() SyncMode { + return SyncMode(d.mode.Load()) +} + +func (d *Downloader) WithListener(l sync.EventListener) { + d.baseSyncer.WithListener(l) +} diff --git a/p2p/modes.go b/p2p/modes.go new file mode 100644 index 0000000000..32da59e42f --- /dev/null +++ b/p2p/modes.go @@ -0,0 +1,74 @@ +package p2p + +import ( + "encoding" + "fmt" + + "github.com/spf13/pflag" +) + +// The following are necessary for Cobra and Viper, respectively, to unmarshal +// CLI/config parameters properly. +var ( + _ pflag.Value = (*SyncMode)(nil) + _ encoding.TextUnmarshaler = (*SyncMode)(nil) +) + +// SyncMode represents the synchronisation mode of the downloader. +// It is a uint32 as it is used with atomic operations. +type SyncMode uint32 + +const ( + FullSync SyncMode = iota // Synchronise by downloading blocks and applying them to the chain sequentially + SnapSync // Download the chain and the state via snap protocol +) + +func (s SyncMode) IsValid() bool { + return s == FullSync || s == SnapSync +} + +func (s SyncMode) String() string { + switch s { + case FullSync: + return "full" + case SnapSync: + return "snap" + default: + return "unknown" + } +} + +func (s SyncMode) Type() string { + return "SyncMode" +} + +func (s SyncMode) MarshalYAML() (interface{}, error) { + return s.String(), nil +} + +func (s *SyncMode) Set(mode string) error { + switch mode { + case "full": + *s = FullSync + case "snap": + *s = SnapSync + default: + return fmt.Errorf("unknown sync mode %q, want \"full\" or \"snap\"", mode) + } + return nil +} + +func (s SyncMode) MarshalText() ([]byte, error) { + switch s { + case FullSync: + return []byte("full"), nil + case SnapSync: + return []byte("snap"), nil + default: + return nil, fmt.Errorf("unknown sync mode %d", s) + } +} + +func (s *SyncMode) UnmarshalText(text []byte) error { + return s.Set(string(text)) +} diff --git a/p2p/p2p.go b/p2p/p2p.go index 49633f49ee..e344ca41d6 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -48,15 +48,20 @@ type Service struct { topics map[string]*pubsub.Topic topicsLock sync.RWMutex - synchroniser *syncService + downloader *Downloader gossipTracer *gossipTracer - feederNode bool - database db.DB + database db.DB } -func New(addr, publicAddr, version, peers, privKeyStr string, feederNode bool, bc *blockchain.Blockchain, snNetwork *utils.Network, - log utils.SimpleLogger, database db.DB, +func New( + addr, publicAddr, version, peers, privKeyStr string, + feederNode bool, + syncMode SyncMode, + bc *blockchain.Blockchain, + snNetwork *utils.Network, + log utils.SimpleLogger, + database db.DB, ) (*Service, error) { if addr == "" { // 0.0.0.0/tcp/0 will listen on any interface device and assing a free port. @@ -113,10 +118,10 @@ func New(addr, publicAddr, version, peers, privKeyStr string, feederNode bool, b // Todo: try to understand what will happen if user passes a multiaddr with p2p public and a private key which doesn't match. // For example, a user passes the following multiaddr: --p2p-addr=/ip4/0.0.0.0/tcp/7778/p2p/(SomePublicKey) and also passes a // --p2p-private-key="SomePrivateKey". However, the private public key pair don't match, in this case what will happen? - return NewWithHost(p2pHost, peers, feederNode, bc, snNetwork, log, database) + return NewWithHost(p2pHost, peers, feederNode, syncMode, bc, snNetwork, log, database) } -func NewWithHost(p2phost host.Host, peers string, feederNode bool, bc *blockchain.Blockchain, snNetwork *utils.Network, +func NewWithHost(p2phost host.Host, peers string, feederNode bool, syncMode SyncMode, bc *blockchain.Blockchain, snNetwork *utils.Network, log utils.SimpleLogger, database db.DB, ) (*Service, error) { var ( @@ -147,19 +152,19 @@ func NewWithHost(p2phost host.Host, peers string, feederNode bool, bc *blockchai return nil, err } - // todo: reconsider initialising synchroniser here because if node is a feedernode we shouldn't not create an instance of it. + downloader := NewDownloader(feederNode, syncMode, p2phost, snNetwork, bc, log) + handler := starknet.NewHandler(bc, log) + handler.WithSnapsyncSupport(NewSnapServer(bc, log)) // TODO: initialise the snap server in the starknet handler - synchroniser := newSyncService(bc, p2phost, snNetwork, log) s := &Service{ - synchroniser: synchroniser, - log: log, - host: p2phost, - network: snNetwork, - dht: p2pdht, - feederNode: feederNode, - topics: make(map[string]*pubsub.Topic), - handler: starknet.NewHandler(bc, log), - database: database, + downloader: downloader, + log: log, + host: p2phost, + network: snNetwork, + dht: p2pdht, + topics: make(map[string]*pubsub.Topic), + handler: handler, + database: database, } return s, nil } @@ -267,9 +272,8 @@ func (s *Service) Run(ctx context.Context) error { s.setProtocolHandlers() - if !s.feederNode { - s.synchroniser.start(ctx) - } + // Start the syncing process + s.downloader.Start(ctx) <-ctx.Done() if err := s.persistPeers(); err != nil { @@ -287,6 +291,10 @@ func (s *Service) setProtocolHandlers() { s.SetProtocolHandler(starknet.TransactionsPID(), s.handler.TransactionsHandler) s.SetProtocolHandler(starknet.ClassesPID(), s.handler.ClassesHandler) s.SetProtocolHandler(starknet.StateDiffPID(), s.handler.StateDiffHandler) + s.SetProtocolHandler(starknet.SnapshotClassRangePID(), s.handler.ClassRangeHandler) + s.SetProtocolHandler(starknet.SnapshotContractRangePID(), s.handler.ContractRangeHandler) + s.SetProtocolHandler(starknet.SnapshotContractStorageRangePID(), s.handler.ContractStorageHandler) + s.SetProtocolHandler(starknet.SnapshotClassesPID(), s.handler.ClassHashesHandler) } func (s *Service) callAndLogErr(f func() error, msg string) { @@ -405,7 +413,7 @@ func (s *Service) SetProtocolHandler(pid protocol.ID, handler func(network.Strea } func (s *Service) WithListener(l junoSync.EventListener) { - s.synchroniser.WithListener(l) + s.downloader.WithListener(l) } func (s *Service) WithGossipTracer() { diff --git a/p2p/p2p_test.go b/p2p/p2p_test.go index 070a9eedb8..0dd493027b 100644 --- a/p2p/p2p_test.go +++ b/p2p/p2p_test.go @@ -34,6 +34,7 @@ func TestService(t *testing.T) { peerHosts[0], "", false, + p2p.FullSync, nil, &utils.Integration, utils.NewNopZapLogger(), @@ -56,6 +57,7 @@ func TestService(t *testing.T) { peerHosts[1], strings.Join(peerAddrsString, ","), true, + p2p.FullSync, nil, &utils.Integration, utils.NewNopZapLogger(), @@ -144,6 +146,7 @@ func TestInvalidKey(t *testing.T) { "", "something", false, + p2p.FullSync, nil, &utils.Integration, utils.NewNopZapLogger(), @@ -162,6 +165,7 @@ func TestValidKey(t *testing.T) { "", "08011240333b4a433f16d7ca225c0e99d0d8c437b835cb74a98d9279c561977690c80f681b25ccf3fa45e2f2de260149c112fa516b69057dd3b0151a879416c0cb12d9b3", false, + p2p.FullSync, nil, &utils.Integration, utils.NewNopZapLogger(), @@ -199,6 +203,7 @@ func TestLoadAndPersistPeers(t *testing.T) { "", "5f6cdc3aebcc74af494df054876100368ef6126e3a33fa65b90c765b381ffc37a0a63bbeeefab0740f24a6a38dabb513b9233254ad0020c721c23e69bc820089", false, + p2p.FullSync, nil, &utils.Integration, utils.NewNopZapLogger(), diff --git a/p2p/snap_server.go b/p2p/snap_server.go new file mode 100644 index 0000000000..d757dede5c --- /dev/null +++ b/p2p/snap_server.go @@ -0,0 +1,455 @@ +package p2p + +import ( + "iter" + "math/big" + + "github.com/NethermindEth/juno/adapters/core2p2p" + "github.com/NethermindEth/juno/adapters/p2p2core" + "github.com/NethermindEth/juno/blockchain" + "github.com/NethermindEth/juno/core" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/core/trie" + "github.com/NethermindEth/juno/p2p/starknet/spec" + "github.com/NethermindEth/juno/utils" + "github.com/ethereum/go-ethereum/log" + "google.golang.org/protobuf/proto" +) + +type ContractRangeStreamingResult struct { + ContractsRoot *felt.Felt + ClassesRoot *felt.Felt + Range []*spec.ContractState + RangeProof *spec.PatriciaRangeProof +} + +type StorageRangeRequest struct { + StateRoot *felt.Felt + ChunkPerProof uint64 // Missing in spec + Queries []*spec.StorageRangeQuery +} + +type StorageRangeStreamingResult struct { + ContractsRoot *felt.Felt + ClassesRoot *felt.Felt + StorageAddr *felt.Felt + Range []*spec.ContractStoredValue + RangeProof *spec.PatriciaRangeProof +} + +type ClassRangeStreamingResult struct { + ContractsRoot *felt.Felt + ClassesRoot *felt.Felt + Range *spec.Classes + RangeProof *spec.PatriciaRangeProof +} + +type SnapServerBlockchain interface { + GetStateForStateRoot(stateRoot *felt.Felt) (*core.State, error) + GetClasses(felts []*felt.Felt) ([]core.Class, error) +} + +type yieldFunc = func(proto.Message) bool + +var _ SnapServerBlockchain = (*blockchain.Blockchain)(nil) + +func NewSnapServer(bc SnapServerBlockchain, logger utils.SimpleLogger) *snapServer { + return &snapServer{ + log: logger, + blockchain: bc, + } +} + +type snapServer struct { + log utils.SimpleLogger + blockchain SnapServerBlockchain +} + +func determineMaxNodes(specifiedMaxNodes uint32) uint32 { + const ( + defaultMaxNodes = 1024 * 16 + maxNodePerRequest = 1024 * 1024 // I just want it to process faster + ) + + if specifiedMaxNodes == 0 { + return defaultMaxNodes + } + + if specifiedMaxNodes < maxNodePerRequest { + return specifiedMaxNodes + } + + return maxNodePerRequest +} + +func (b *snapServer) GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[proto.Message], error) { + var finMsg proto.Message = &spec.ClassRangeResponse{ + Responses: &spec.ClassRangeResponse_Fin{}, + } + + stateRoot := p2p2core.AdaptHash(request.Root) + + return func(yield yieldFunc) { + s, err := b.blockchain.GetStateForStateRoot(stateRoot) + if err != nil { + log.Error("error getting state for state root", "err", err) + return + } + + contractRoot, classRoot, err := s.StateAndClassRoot() + if err != nil { + log.Error("error getting state and class root", "err", err) + return + } + + ctrie, classCloser, err := s.ClassTrie() + if err != nil { + log.Error("error getting class trie", "err", err) + return + } + defer func() { _ = classCloser() }() + + startAddr := p2p2core.AdaptHash(request.Start) + limitAddr := p2p2core.AdaptHash(request.End) + if limitAddr != nil && limitAddr.IsZero() { + limitAddr = nil + } + + for { + response := &spec.Classes{ + Classes: make([]*spec.Class, 0), + } + + classkeys := []*felt.Felt{} + proofs, finished, err := ctrie.IterateWithLimit(startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, + func(key, value *felt.Felt) error { + classkeys = append(classkeys, key) + return nil + }) + if err != nil { + log.Error("error iterating class trie", "err", err) + return + } + + coreClasses, err := b.blockchain.GetClasses(classkeys) + if err != nil { + log.Error("error getting classes", "err", err) + return + } + + for _, coreclass := range coreClasses { + if coreclass == nil { + log.Error("nil class in the returned array of core classes") + return + } + response.Classes = append(response.Classes, core2p2p.AdaptClass(coreclass)) + } + + clsMsg := &spec.ClassRangeResponse{ + ContractsRoot: core2p2p.AdaptHash(contractRoot), + ClassesRoot: core2p2p.AdaptHash(classRoot), + Responses: &spec.ClassRangeResponse_Classes{ + Classes: response, + }, + RangeProof: Core2P2pProof(proofs), + } + + first := classkeys[0] + last := classkeys[len(classkeys)-1] + b.log.Infow("sending class range response", "len(classes)", len(classkeys), "first", first, "last", last) + if !yield(clsMsg) { + // we should not send `FinMsg` when the client explicitly asks to stop + return + } + if finished { + break + } + startAddr = classkeys[len(classkeys)-1] + } + + yield(finMsg) + b.log.Infow("class range iteration completed") + }, nil +} + +func (b *snapServer) GetContractRange(request *spec.ContractRangeRequest) (iter.Seq[proto.Message], error) { + var finMsg proto.Message = &spec.ContractRangeResponse{ + Responses: &spec.ContractRangeResponse_Fin{}, + } + stateRoot := p2p2core.AdaptHash(request.StateRoot) + + return func(yield yieldFunc) { + s, err := b.blockchain.GetStateForStateRoot(stateRoot) + if err != nil { + log.Error("error getting state for state root", "err", err) + return + } + + contractRoot, classRoot, err := s.StateAndClassRoot() + if err != nil { + log.Error("error getting state and class root", "err", err) + return + } + + strie, scloser, err := s.StorageTrie() + if err != nil { + log.Error("error getting storage trie", "err", err) + return + } + defer func() { _ = scloser() }() + + startAddr := p2p2core.AdaptAddress(request.Start) + limitAddr := p2p2core.AdaptAddress(request.End) + states := []*spec.ContractState{} + + for { + proofs, finished, err := strie.IterateWithLimit(startAddr, limitAddr, determineMaxNodes(request.ChunksPerProof), b.log, + func(key, value *felt.Felt) error { + classHash, err := s.ContractClassHash(key) + if err != nil { + return err + } + + nonce, err := s.ContractNonce(key) + if err != nil { + return err + } + + ctr, err := s.StorageTrieForAddr(key) + if err != nil { + return err + } + + croot, err := ctr.Root() + if err != nil { + return err + } + + startAddr = key + states = append(states, &spec.ContractState{ + Address: core2p2p.AdaptAddress(key), + Class: core2p2p.AdaptHash(classHash), + Storage: core2p2p.AdaptHash(croot), + Nonce: nonce.Uint64(), + }) + return nil + }) + if err != nil { + log.Error("error iterating storage trie", "err", err) + return + } + + cntrMsg := &spec.ContractRangeResponse{ + Root: request.StateRoot, + ContractsRoot: core2p2p.AdaptHash(contractRoot), + ClassesRoot: core2p2p.AdaptHash(classRoot), + RangeProof: Core2P2pProof(proofs), + Responses: &spec.ContractRangeResponse_Range{ + Range: &spec.ContractRange{ + State: states, + }, + }, + } + + if !yield(cntrMsg) { + // we should not send `FinMsg` when the client explicitly asks to stop + return + } + if finished { + break + } + + states = states[:0] + } + + yield(finMsg) + b.log.Infow("contract range iteration completed") + }, nil +} + +func (b *snapServer) GetStorageRange(request *spec.ContractStorageRequest) (iter.Seq[proto.Message], error) { + var finMsg proto.Message = &spec.ContractStorageResponse{ + Responses: &spec.ContractStorageResponse_Fin{}, + } + + return func(yield yieldFunc) { + stateRoot := p2p2core.AdaptHash(request.StateRoot) + + s, err := b.blockchain.GetStateForStateRoot(stateRoot) + if err != nil { + log.Error("error getting state for state root", "err", err) + return + } + + var curNodeLimit uint32 = 1000000 + + // shouldContinue is a return value from the yield function which specify whether the iteration should continue + shouldContinue := true + for _, query := range request.Query { + contractLimit := curNodeLimit + + strie, err := s.StorageTrieForAddr(p2p2core.AdaptAddress(query.Address)) + if err != nil { + addr := p2p2core.AdaptAddress(query.Address) + log.Error("error getting storage trie for address", "addr", addr, "err", err) + return + } + + handled, err := b.handleStorageRangeRequest(strie, query, request.ChunksPerProof, contractLimit, b.log, + func(values []*spec.ContractStoredValue, proofs []trie.ProofNode) bool { + stoMsg := &spec.ContractStorageResponse{ + StateRoot: request.StateRoot, + ContractAddress: query.Address, + RangeProof: Core2P2pProof(proofs), + Responses: &spec.ContractStorageResponse_Storage{ + Storage: &spec.ContractStorage{ + KeyValue: values, + }, + }, + } + if shouldContinue = yield(stoMsg); !shouldContinue { + return false + } + return true + }) + if err != nil { + log.Error("error handling storage range request", "err", err) + return + } + + curNodeLimit -= handled + + if curNodeLimit <= 0 { + break + } + } + if shouldContinue { + // we should `Fin` only when client expects iteration to continue + yield(finMsg) + } + }, nil +} + +func (b *snapServer) GetClasses(request *spec.ClassHashesRequest) (iter.Seq[proto.Message], error) { + var finMsg proto.Message = &spec.ClassesResponse{ + ClassMessage: &spec.ClassesResponse_Fin{}, + } + + return func(yield yieldFunc) { + felts := make([]*felt.Felt, len(request.ClassHashes)) + for i, hash := range request.ClassHashes { + felts[i] = p2p2core.AdaptHash(hash) + } + + coreClasses, err := b.blockchain.GetClasses(felts) + if err != nil { + log.Error("error getting classes", "err", err) + return + } + + for _, cls := range coreClasses { + clsMsg := &spec.ClassesResponse{ + ClassMessage: &spec.ClassesResponse_Class{ + Class: core2p2p.AdaptClass(cls), + }, + } + if !yield(clsMsg) { + // we should not send `FinMsg` when the client explicitly asks to stop + return + } + } + + yield(finMsg) + }, nil +} + +func (b *snapServer) handleStorageRangeRequest( + stTrie *trie.Trie, + request *spec.StorageRangeQuery, + maxChunkPerProof, nodeLimit uint32, + logger utils.SimpleLogger, + yield func([]*spec.ContractStoredValue, []trie.ProofNode) bool, +) (uint32, error) { + totalSent := 0 + finished := false + startAddr := p2p2core.AdaptFelt(request.Start.Key) + var endAddr *felt.Felt = nil + if request.End != nil { + endAddr = p2p2core.AdaptFelt(request.End.Key) + } + + for !finished { + response := []*spec.ContractStoredValue{} + + limit := maxChunkPerProof + if nodeLimit < limit { + limit = nodeLimit + } + + proofs, finish, err := stTrie.IterateWithLimit(startAddr, endAddr, limit, logger, + func(key, value *felt.Felt) error { + response = append(response, &spec.ContractStoredValue{ + Key: core2p2p.AdaptFelt(key), + Value: core2p2p.AdaptFelt(value), + }) + + startAddr = key + return nil + }) + finished = finish + + if err != nil { + return 0, err + } + + if len(response) == 0 { + finished = true + } + + if !yield(response, proofs) { + finished = true + } + + totalSent += len(response) + nodeLimit -= limit + + asBint := startAddr.BigInt(big.NewInt(0)) + asBint = asBint.Add(asBint, big.NewInt(1)) + startAddr = startAddr.SetBigInt(asBint) + } + + return uint32(totalSent), nil +} + +func Core2P2pProof(proofs []trie.ProofNode) *spec.PatriciaRangeProof { + nodes := make([]*spec.PatriciaNode, len(proofs)) + + for i := range proofs { + switch node := proofs[i].(type) { + case *trie.Edge: + pathFelt := node.Path.Felt() + nodes[i] = &spec.PatriciaNode{ + Node: &spec.PatriciaNode_Edge_{ + Edge: &spec.PatriciaNode_Edge{ + Length: uint32(node.Path.Len()), + Path: core2p2p.AdaptFelt(&pathFelt), + Value: core2p2p.AdaptFelt(node.Child), + }, + }, + } + case *trie.Binary: + nodes[i] = &spec.PatriciaNode{ + Node: &spec.PatriciaNode_Binary_{ + Binary: &spec.PatriciaNode_Binary{ + Left: core2p2p.AdaptFelt(node.LeftHash), + Right: core2p2p.AdaptFelt(node.RightHash), + }, + }, + } + } + } + + return &spec.PatriciaRangeProof{ + Nodes: nodes, + } +} diff --git a/p2p/snap_server_test.go b/p2p/snap_server_test.go new file mode 100644 index 0000000000..22adbf56bb --- /dev/null +++ b/p2p/snap_server_test.go @@ -0,0 +1,813 @@ +package p2p + +import ( + "context" + "fmt" + "maps" + "testing" + + "github.com/NethermindEth/juno/adapters/core2p2p" + "github.com/NethermindEth/juno/adapters/p2p2core" + "github.com/NethermindEth/juno/blockchain" + "github.com/NethermindEth/juno/core" + "github.com/NethermindEth/juno/core/crypto" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/db" + "github.com/NethermindEth/juno/db/pebble" + "github.com/NethermindEth/juno/p2p/starknet/spec" + "github.com/NethermindEth/juno/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClassRange(t *testing.T) { + // Note: set to true to make test super long to complete + shouldFetchAllClasses := false + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + startRange := (&felt.Felt{}).SetUint64(0) + finMsgReceived := false + chunksReceived := 0 + + chunksPerProof := 150 + if shouldFetchAllClasses { + // decrease iteration count and hence speed up a bit + chunksPerProof *= 4 + } + iter, err := server.GetClassRange( + &spec.ClassRangeRequest{ + Root: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptHash(startRange), + ChunksPerProof: uint32(chunksPerProof), + }) + assert.NoError(t, err) + + for res := range iter { + assert.NotNil(t, res) + + resT, ok := res.(*spec.ClassRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ClassRangeResponse_Classes: + assert.True(t, chunksPerProof >= len(v.Classes.Classes)) + classesRoot := p2p2core.AdaptHash(resT.ClassesRoot) + contractsRoot := p2p2core.AdaptHash(resT.ContractsRoot) + verifyErr := VerifyGlobalStateRoot(stateRoot, classesRoot, contractsRoot) + assert.NoError(t, verifyErr) + chunksReceived++ + case *spec.ClassRangeResponse_Fin: + finMsgReceived = true + } + + if !shouldFetchAllClasses { + break + } + } + + if !shouldFetchAllClasses { + assert.Equal(t, 1, chunksReceived) + assert.False(t, finMsgReceived) + } else { + fmt.Printf("ClassesReceived: \t%d\n", chunksReceived) + assert.True(t, finMsgReceived) + assert.True(t, chunksReceived > 1) + } +} + +func TestContractRange(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + startRange := (&felt.Felt{}).SetUint64(0) + chunksReceived := 0 + + chunksPerProof := 150 + ctrIter, err := server.GetContractRange( + &spec.ContractRangeRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptAddress(startRange), + ChunksPerProof: uint32(chunksPerProof), + }) + assert.NoError(t, err) + + for res := range ctrIter { + assert.NotNil(t, res) + + resT, ok := res.(*spec.ContractRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + assert.True(t, chunksPerProof == len(v.Range.State)) + classesRoot := p2p2core.AdaptHash(resT.ClassesRoot) + contractsRoot := p2p2core.AdaptHash(resT.ContractsRoot) + verifyErr := VerifyGlobalStateRoot(stateRoot, classesRoot, contractsRoot) + assert.NoError(t, verifyErr) + chunksReceived++ + default: + // we expect no any other message only just one range because we break the iteration + t.Fatal("received unexpected message", "type", v) + } + + // we don't need to fetch all contracts + break + } + + assert.Equal(t, 1, chunksReceived) +} + +func TestContractRangeByOneContract(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + tests := []struct { + address *felt.Felt + expectedStorageRoot *felt.Felt + expectedClassHash *felt.Felt + expectedNonce uint64 + }{ + { + address: feltFromString("0x27b0a1ba755185b8d05126a1e00ca687e6680e51d634b5218760b716b8d06"), + expectedStorageRoot: feltFromString("0xa8d7943793ddd09e49b8650a71755ed04d0de087b28ad5967b519864f9844"), + expectedClassHash: feltFromString("0x772164c9d6179a89e7f1167f099219f47d752304b16ed01f081b6e0b45c93c3"), + expectedNonce: 0, + }, + { + address: feltFromString("0x292854fdd7653f65d8adc66739866567c212f2ef15ad8616e713eafc97e0a"), + expectedStorageRoot: feltFromString("0x718f57f8cd2950a0f240941876eafdffe86c84bd2601de4ea244956d96d85b6"), + expectedClassHash: feltFromString("0x29927c8af6bccf3f6fda035981e765a7bdbf18a2dc0d630494f8758aa908e2b"), + expectedNonce: 2, + }, + { + address: feltFromString("0xf3c569521d6ca43a0e2b86fd251031e2158aae502bf199f7eec986fe348f"), + expectedStorageRoot: feltFromString("0x41c2705457dfa3872cbc862ac86c85d118259154f08408c3cd350a15646d596"), + expectedClassHash: feltFromString("0x29927c8af6bccf3f6fda035981e765a7bdbf18a2dc0d630494f8758aa908e2b"), + expectedNonce: 1, + }, + { + address: feltFromString("0x5edd7ece0dc39633f9825e16e39e17a0bded7bf540a685876ceb75cdfd9eb"), + expectedStorageRoot: feltFromString("0x26f6e269f9462b6bf1649394aa4080d40ca4f4bc792bd0c1a72a8b524a93a9d"), + expectedClassHash: feltFromString("0x66559c86e66214ba1bc5d6512f6411aa066493e6086ff5d54f41a970d47fc5a"), + expectedNonce: 0, + }, + { + address: feltFromString("0x4c04ec7c3c5a82df2d194095f090af83a9f26e22544d968c3d67c1b320d43"), + expectedStorageRoot: feltFromString("0x0"), + expectedNonce: 0, + }, + { + address: feltFromString("0x4ccd60176f9e757031f04691beb09832a0ea583eeb5158b05277547957514"), + expectedStorageRoot: feltFromString("0x0"), + expectedNonce: 1, + }, + { + address: feltFromString("0xd94fd19a7730f84df43999562cbbf5cf8d48a6cb92f5bc5d6795f34c15f72"), + expectedStorageRoot: feltFromString("0x0"), + expectedNonce: 0, + }, + { + address: feltFromString("0xdd92645559c6dca08c6e947b4a40a55142a0a8b65552be8b31c885f37ef87"), + expectedStorageRoot: feltFromString("0x0"), + expectedNonce: 1, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%.7s...", test.address), func(t *testing.T) { + chunksPerProof := 5 + ctrIter, err := server.GetContractRange( + &spec.ContractRangeRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptAddress(test.address), + End: core2p2p.AdaptAddress(test.address), + ChunksPerProof: uint32(chunksPerProof), + }) + assert.NoError(t, err) + + finReceived := false + chunksReceived := 0 + + for res := range ctrIter { + assert.NotNil(t, res) + + resT, ok := res.(*spec.ContractRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + crctStates := v.Range.State + assert.Len(t, crctStates, 1) + + crct := crctStates[0] + address := p2p2core.AdaptAddress(crct.Address) + assert.Equal(t, test.address, address) + + storageRoot := p2p2core.AdaptHash(crct.Storage) + assert.Equal(t, test.expectedStorageRoot, storageRoot) + + //nolint:nolintlint // classHash := p2p2core.AdaptHash(crct.Class) + //nolint:nolintlint // assert.Equal(t, test.expectedClassHash, classHash, "classHash", classHash) + + assert.Equal(t, test.expectedNonce, crct.Nonce) + + chunksReceived++ + case *spec.ContractRangeResponse_Fin: + assert.Equal(t, 1, chunksReceived) + finReceived = true + default: + // we expect no any other message only just one range because we break the iteration + t.Fatal("received unexpected message", "type", v) + } + } + + assert.True(t, finReceived) + }) + } +} + +func TestContractRange_FinMsg_Received(t *testing.T) { + // TODO: Fix the test so it demonstrated FinMsg is returned at the iteration end + t.Skip("Fix me") + d := pebble.NewMemTest(t) + bc := blockchain.New(d, &utils.Sepolia) + defer bc.Close() + server := &snapServer{blockchain: bc} + + zero := new(felt.Felt).SetUint64(0) + iter, err := server.GetContractRange( + &spec.ContractRangeRequest{ + StateRoot: core2p2p.AdaptHash(zero), + Start: core2p2p.AdaptAddress(zero), + ChunksPerProof: uint32(10), + }) + assert.NoError(t, err) + fmt.Printf("All Good!\n") + + finMsgReceived := false + for res := range iter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + assert.IsType(t, spec.ContractRangeResponse_Fin{}, resT) + finMsgReceived = true + } + assert.True(t, finMsgReceived) +} + +func TestContractStorageRange(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + startRange := (&felt.Felt{}).SetUint64(0) + + tests := []struct { + address *felt.Felt + storageRoot *felt.Felt + expectedLeaves int + }{ + { + address: feltFromString("0x5eb8d1bc5aaf2f323f2a807d429686ac012ca16f90740071d2f3a160dc231"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0x614a5e0519963324acb5640321240827c0cd6a9f7cf5f17a80c1596e607d0"), + storageRoot: feltFromString("0x55ee7fd57d0aa3da8b89ea2feda16f9435186988a8b00b6f22f5ba39f3cf172"), + expectedLeaves: 1, + }, + { + address: feltFromString("0x3deecdb26a60e4c062d5bd98ab37f72ea2acc37f28dae6923359627ebde9"), + storageRoot: feltFromString("0x276edbc91a945d11645ba0b8298c7d657e554d06ab2bb765cbc44d61fa01fd5"), + expectedLeaves: 1, + }, + { + address: feltFromString("0x5de00d3720421ab00fdbc47d33d253605c1ac226ab1a0d267f7d57e23305"), + storageRoot: feltFromString("0x5eebb2c6722d321469cb662260c5171c9f6f67b9a625c9c9ab56b0a4631b0fe"), + expectedLeaves: 2, + }, + { + address: feltFromString("0x1ee60ed3c5abd9a08c61de5e8cbcf32b49646e681ee6e84da9d52f5c3099"), + storageRoot: feltFromString("0x60dccd54f4956147c6a499b71579820d181e22d5e9c430fd5953f861ca7727e"), + expectedLeaves: 4, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%.7s...", test.address), func(t *testing.T) { + request := &spec.ContractStorageRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + ChunksPerProof: 100, + Query: []*spec.StorageRangeQuery{ + { + Address: core2p2p.AdaptAddress(test.address), + Start: &spec.StorageLeafQuery{ + ContractStorageRoot: core2p2p.AdaptHash(test.storageRoot), + Key: core2p2p.AdaptFelt(startRange), + }, + End: nil, + }, + }, + } + + keys := make([]*felt.Felt, 0, test.expectedLeaves) + vals := make([]*felt.Felt, 0, test.expectedLeaves) + stoIter, err := server.GetStorageRange(request) + assert.NoError(t, err) + + finMsgReceived := false + for res := range stoIter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractStorageResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ContractStorageResponse_Storage: + assert.False(t, finMsgReceived) + for _, r := range v.Storage.KeyValue { + keys = append(keys, p2p2core.AdaptFelt(r.Key)) + vals = append(vals, p2p2core.AdaptFelt(r.Value)) + } + case *spec.ContractStorageResponse_Fin: + // we expect just one fin message at the iteration end + finMsgReceived = true + } + } + assert.True(t, finMsgReceived) + + fmt.Println("Address:", test.address, "storage length:", len(keys)) + assert.Equal(t, test.expectedLeaves, len(keys)) + + hasMore, err := VerifyTrie(test.storageRoot, keys, vals, nil, core.ContractStorageTrieHeight, crypto.Pedersen) + assert.NoError(t, err) + assert.False(t, hasMore) + }) + } +} + +func TestGetClassesByHash(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + hashes := []*spec.Hash{ + // Block , type v0 + core2p2p.AdaptHash(feltFromString("0x7db5c2c2676c2a5bfc892ee4f596b49514e3056a0eee8ad125870b4fb1dd909")), + // Block , type v0 + core2p2p.AdaptHash(feltFromString("0x28d1671fb74ecb54d848d463cefccffaef6df3ae40db52130e19fe8299a7b43")), + // Block , type v0 + core2p2p.AdaptHash(feltFromString("0x772164c9d6179a89e7f1167f099219f47d752304b16ed01f081b6e0b45c93c3")), + // Block , type v0 + core2p2p.AdaptHash(feltFromString("0x78401746828463e2c3f92ebb261fc82f7d4d4c8d9a80a356c44580dab124cb0")), + } + + finMsgReceived := false + iter, err := server.GetClasses( + &spec.ClassHashesRequest{ + ClassHashes: hashes, + }) + assert.NoError(t, err) + + i := 0 + for res := range iter { + assert.NotNil(t, res) + + resT, ok := res.(*spec.ClassesResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetClassMessage().(type) { + case *spec.ClassesResponse_Class: + assert.True(t, i < len(hashes)) + assert.Equal(t, v.Class.GetClassHash(), hashes[i]) + case *spec.ClassesResponse_Fin: + assert.Equal(t, len(hashes), i) + finMsgReceived = true + } + + i++ + } + assert.True(t, finMsgReceived) +} + +//nolint:gocyclo +func Test__Finding_Storage_Heavy_Contract(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + ctso := make(map[felt.Felt]*felt.Felt) + request := &spec.ContractRangeRequest{ + ChunksPerProof: 100, + Start: core2p2p.AdaptAddress(felt.Zero.Clone()), + End: nil, // core2p2p.AdaptAddress(test.address), + StateRoot: core2p2p.AdaptHash(stateRoot), + } + + iter, err := server.GetContractRange(request) + assert.NoError(t, err) + + contracts := 0 + for res := range iter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractRangeResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + for _, contract := range v.Range.State { + addr := p2p2core.AdaptAddress(contract.Address) + strt := p2p2core.AdaptHash(contract.Storage) + //nolint:nolintlint // assert.Equal(t, test.address, addr) + //nolint:nolintlint // assert.Equal(t, test.storageRoot, strt) + if !(strt.IsZero() || addr.IsOne()) { + ctso[*addr] = strt + contracts++ + } + } + case *spec.ContractRangeResponse_Fin: + fmt.Println("Contract iteration ends", "contracts", contracts) + default: + // we expect no any other message only just one range because we break the iteration + t.Fatal("received unexpected message", "type", v) + } + + if contracts > 100 { + break + } + } + + keys := make([]*felt.Felt, 0, len(ctso)) + stoCnt := make(map[felt.Felt]int) + for k := range maps.Keys(ctso) { + keys = append(keys, k.Clone()) + } + + for len(keys) > 10 { + var queries []*spec.StorageRangeQuery + for i := range 10 { + addr := keys[i] + queries = append(queries, &spec.StorageRangeQuery{ + Address: core2p2p.AdaptAddress(addr), + Start: &spec.StorageLeafQuery{ + ContractStorageRoot: core2p2p.AdaptHash(ctso[*addr]), + Key: core2p2p.AdaptFelt(felt.Zero.Clone()), + }, + End: nil, + }) + } + keys = keys[10:] + + sreq := &spec.ContractStorageRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + ChunksPerProof: uint32(500), + Query: queries, + } + + iter, err := server.GetStorageRange(sreq) + require.NoError(t, err) + + for res := range iter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractStorageResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + addr := p2p2core.AdaptAddress(resT.ContractAddress) + + switch v := resT.GetResponses().(type) { + case *spec.ContractStorageResponse_Storage: + vl := stoCnt[*addr] + //nolint:nolintlint // if !ok { stoCnt[*addr] = 0 } + stoCnt[*addr] = vl + len(v.Storage.KeyValue) + case *spec.ContractStorageResponse_Fin: + // we expect just one fin message at the iteration end + fmt.Println("End of iter", "no addr") + } + } + } + + for addr, cnt := range stoCnt { + if cnt <= 3 { + fmt.Printf("[%5d]: address %s, storageRoot %s\n", cnt, &addr, ctso[addr]) + } + } +} + +func TestGetContractStorageRoot(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/juno-sepolia", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) // Needed because class loader need encoder to be registered + + b, err := bc.Head() + assert.NoError(t, err) + + fmt.Printf("headblock %d\n", b.Number) + + stateRoot := b.GlobalStateRoot + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + server := &snapServer{ + log: logger, + blockchain: bc, + } + + tests := []struct { + address *felt.Felt + storageRoot *felt.Felt + expectedLeaves int + }{ + { + address: feltFromString("0x2375219f8c73b77eef29d7cfd3749d64d2cca6ad7a776ed85bd33ff09201ea"), + storageRoot: feltFromString("0x24e11cc263e8d4c37519a95dc5cc4bc2627a991da6048b9a41f011e586cd3cc"), + expectedLeaves: 140, + }, + { + address: feltFromString("0xec1131fe035c235c03e0ad43646d8cbfd59d048b1825b0a36a167c468d5bf"), + storageRoot: feltFromString("0x21c409ca7f7d064d5e580e756cc945d3b266ab852e6d982697177e57d4c96a0"), + expectedLeaves: 193, + }, + { + address: feltFromString("0xc41025be6d90828b1af119d384cecf1a57da8190ce79a2ffd925f02b59df"), + storageRoot: feltFromString("0x3f6b341ce4fb8441a0b350932a89cb19e195479e43ade9eb9e2fcde31a64680"), + expectedLeaves: 187, + }, + { + address: feltFromString("0xb5a14ddd6d1a6b33a10411e45bbff54f92265ede856cd0e12fee4a638c389"), + storageRoot: feltFromString("0x331a990cf32f6eedf01ca9b577cb75b53d937333729f48091945e255fff4a3d"), + expectedLeaves: 137, + }, + { + address: feltFromString("0x130b5a3035eef0470cff2f9a450a7a6856a3c5a4ea3f5b7886c2d03a50d2bf"), + storageRoot: feltFromString("0x4cf3afb0828518a24c0f2a5cc6e87d188df58b5faf40c6b81d6d476cf7897f6"), + expectedLeaves: 338, + }, + { + address: feltFromString("0x267311365224e8d4eb4dd580f1b737f990dfc81112ca71ecce147e774bcecb"), + storageRoot: feltFromString("0x71b0e71d2b69bfbfa25fd54e0cd3673f27f07110c3be72cb81c20aa0c6df4b0"), + expectedLeaves: 701, + }, + { + address: feltFromString("0x28f61c91275111e8c5af6febfa5c9d2191442b4fe48d30a84e51a09e8f18b5"), + storageRoot: feltFromString("0x1de1bf792cd9221c8d6ba2953671d6a4f0890375eca3718989082225dccb7eb"), + expectedLeaves: 540, + }, + { + address: feltFromString("0x4c04ec7c3c5a82df2d194095f090af83a9f26e22544d968c3d67c1b320d43"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0x4ccd60176f9e757031f04691beb09832a0ea583eeb5158b05277547957514"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 1, + }, + { + address: feltFromString("0xd94fd19a7730f84df43999562cbbf5cf8d48a6cb92f5bc5d6795f34c15f72"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0xdd92645559c6dca08c6e947b4a40a55142a0a8b65552be8b31c885f37ef87"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 1, + }, + { + address: feltFromString("0x807dd1766d3c833ac82290e38a000b7d48acce3cf125cffde15ea9e583a95"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0x8502b172cb17395511c4bfabb7ded748cd930fec8201ad6b444f9d6a9df7a"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 1, + }, + { + address: feltFromString("0x5788cee963fe68a76b1ef9b04f1ba404043d853ad593c366c723322381b14"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 0, + }, + { + address: feltFromString("0x5e8dab06aaf28538be2077fddd679cac934c5e082303f892235fb989e800c"), + storageRoot: feltFromString("0x0"), + expectedLeaves: 1, + }, + } + + ctso := make(map[felt.Felt]*felt.Felt) + stoCnt := make(map[felt.Felt]int) + + t.Run("Storage validation ", func(t *testing.T) { + queries := []*spec.StorageRangeQuery{} + for _, dt := range tests { + queries = append(queries, &spec.StorageRangeQuery{ + Address: core2p2p.AdaptAddress(dt.address), + Start: &spec.StorageLeafQuery{ + ContractStorageRoot: core2p2p.AdaptHash(dt.storageRoot), + Key: core2p2p.AdaptFelt(felt.Zero.Clone()), + }, + End: nil, + }) + ctso[*dt.address] = dt.storageRoot + } + + // Contract Storage request contains queries for each tests' contract + // also note we specify `chainsPerProof` to be 200, which will be not enough + // to get all contract's keys in one iteration + sreq := &spec.ContractStorageRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + ChunksPerProof: uint32(200), + Query: queries, + } + + iter, err := server.GetStorageRange(sreq) + require.NoError(t, err) + + // There will be one iteration for contracts where `expectedLeaves` < `chunksPerProof` + // and several iterations otherwise + // only at the end (all queries are responded) we will get `Fin` message + for res := range iter { + assert.NotNil(t, res) + resT, ok := res.(*spec.ContractStorageResponse) + assert.True(t, ok) + assert.NotNil(t, resT) + + switch v := resT.GetResponses().(type) { + case *spec.ContractStorageResponse_Storage: + addr := p2p2core.AdaptAddress(resT.ContractAddress) + vl := stoCnt[*addr] + stoCnt[*addr] = vl + len(v.Storage.KeyValue) + fmt.Printf("Response for %s: %d\n", addr, stoCnt[*addr]) + + case *spec.ContractStorageResponse_Fin: + // we expect just one fin message at the iteration end + fmt.Println("End of iter", "no addr") + } + } + }) + + for _, test := range tests { + t.Run(fmt.Sprintf("%.7s...", test.address), func(t *testing.T) { + // validate storage leaves count + assert.Equal(t, test.expectedLeaves, stoCnt[*test.address]) + }) + } +} + +func TestReadAndVerifySnapshot(t *testing.T) { + var d db.DB + t.Skip("DB snapshot is needed for this test") + d, _ = pebble.NewWithOptions("/Users/pnowosie/juno/snapshots/node1", 128000000, 128, false) + defer func() { _ = d.Close() }() + bc := blockchain.New(d, &utils.Sepolia) + + logger, _ := utils.NewZapLogger(utils.DEBUG, false) + syncer := SnapSyncer{ + log: logger, + blockchain: bc, + currentGlobalStateRoot: feltFromString("0x472e84b65d387c9364b5117f4afaba3fb88897db1f28867b398506e2af89f25"), + } + + err := syncer.PhraseVerify(context.Background()) + assert.NoError(t, err) +} + +func TestPercentageCalculation(t *testing.T) { + tests := []struct { + actual *felt.Felt + percent uint64 + }{ + { + actual: feltFromString("0x0"), + percent: 0, + }, + { + // actual felt.MaxValue:2^251 + 17 * 2^192 + actual: feltFromString("0x800000000000011000000000000000000000000000000000000000000000000"), + percent: 100, + }, + { + actual: feltFromString("0x400000000000008800000000000000000000000000000000000000000000000"), + percent: 50, + }, + { + actual: feltFromString("0x0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + percent: 12, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d%%", test.percent), func(t *testing.T) { + percent := CalculatePercentage(test.actual) + assert.Equal(t, test.percent, percent) + }) + } +} + +func feltFromString(str string) *felt.Felt { + f, err := (&felt.Felt{}).SetString(str) + if err != nil { + panic(err) + } + return f +} diff --git a/p2p/snap_syncer.go b/p2p/snap_syncer.go new file mode 100644 index 0000000000..f425776532 --- /dev/null +++ b/p2p/snap_syncer.go @@ -0,0 +1,1299 @@ +package p2p + +import ( + "context" + "errors" + "fmt" + big "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/NethermindEth/juno/adapters/core2p2p" + "github.com/NethermindEth/juno/adapters/p2p2core" + "github.com/NethermindEth/juno/blockchain" + "github.com/NethermindEth/juno/core" + "github.com/NethermindEth/juno/core/crypto" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/core/trie" + "github.com/NethermindEth/juno/p2p/starknet" + "github.com/NethermindEth/juno/p2p/starknet/spec" + "github.com/NethermindEth/juno/service" + "github.com/NethermindEth/juno/starknetdata" + "github.com/NethermindEth/juno/utils" + "github.com/consensys/gnark-crypto/ecc/stark-curve/fp" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/sync/errgroup" +) + +const JobDuration = time.Second * 10 + +type Blockchain interface { + GetClasses(felts []*felt.Felt) ([]core.Class, error) + PutClasses(blockNumber uint64, v1CompiledHashes map[felt.Felt]*felt.Felt, newClasses map[felt.Felt]core.Class) error + PutContracts(address, nonces, classHash []*felt.Felt) error + PutStorage(storage map[felt.Felt]map[felt.Felt]*felt.Felt) error +} + +var _ Blockchain = (*blockchain.Blockchain)(nil) + +type SnapSyncer struct { + starknetData starknetdata.StarknetData + client *starknet.Client + blockchain Blockchain + log utils.SimpleLogger + + startingBlock *core.Header + lastBlock *core.Header + currentGlobalStateRoot *felt.Felt + + contractRangeDone chan interface{} + storageRangeDone chan interface{} + + storageRangeJobCount int32 + storageRangeJobQueue chan *storageRangeJob + storageRefreshJob chan *storageRangeJob + + classFetchJobCount int32 + classesJob chan *felt.Felt + + // Three lock priority lock + mtxM *sync.Mutex + mtxN *sync.Mutex + mtxL *sync.Mutex +} + +var _ service.Service = (*SnapSyncer)(nil) + +type storageRangeJob struct { + path *felt.Felt + storageRoot *felt.Felt + startKey *felt.Felt + classHash *felt.Felt + nonce uint64 +} + +func NewSnapSyncer( + client *starknet.Client, + bc *blockchain.Blockchain, + log utils.SimpleLogger, +) *SnapSyncer { + return &SnapSyncer{ + client: client, + blockchain: bc, + log: log, + } +} + +var ( + // magic values linter does not like + start = 1.0 + factor = 1.5 + count = 30 + + rangeProgress = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "juno_range_progress", + Help: "Time in address get", + }) + + pivotUpdates = promauto.NewCounter(prometheus.CounterOpts{ + Name: "juno_pivot_update", + Help: "Time in address get", + }) + + storageAddressCount = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "juno_storage_address_count", + Help: "Time in address get", + Buckets: prometheus.ExponentialBuckets(start, factor, count), + }) +) + +var ( + storageJobWorkerCount = 4 + storageBatchSize = 10 + storageJobQueueSize = storageJobWorkerCount * storageBatchSize // Too high and the progress from address range would be inaccurate. + + // For some reason, the trie throughput is higher if the batch size is small. + classRangeChunksPerProof = 50 + contractRangeChunkPerProof = 150 + storageRangeChunkPerProof = 300 + maxStorageBatchSize = 1000 + maxMaxPerStorageSize = 1000 + + fetchClassWorkerCount = 3 // Fairly parallelizable. But this is brute force... + classesJobQueueSize = 64 + classBatchSize = 30 + + maxPivotDistance = 32 // Set to 1 to test updated storage. + newPivotHeadDistance = uint64(0) // This should be the reorg depth +) + +func (s *SnapSyncer) Run(ctx context.Context) error { + s.log.Infow("starting snap sync") + // 1. Get the current head + // 2. Start the snap sync with pivot set to that head + // 3. If at any moment, if: + // a. The current head is too new (more than 64 block let say) + // b. Too many missing node + // then reset the pivot. + // 4. Once finished, replay state update from starting pivot to the latest pivot. + // 5. Then do some cleanup, mark things and complete and such. + // 6. Probably download old state updato/bodies too + // 7. Send back control to base sync. + + s.starknetData = &MockStarkData{} + + err := s.runPhase1(ctx) + if err != nil { + return err + } + s.log.Infow("phase 1 completed") + + if err = s.PhraseVerify(ctx); err != nil { + return err + } + s.log.Infow("trie roots verification completed") + + s.log.Infow("delegating to standard synchronizer") + + return nil + // TODO: start p2p syncer + // s.baseSync.start(ctx) +} + +//nolint:gocyclo,nolintlint +func (s *SnapSyncer) runPhase1(ctx context.Context) error { + starttime := time.Now() + + err := s.initState(ctx) + if err != nil { + return errors.Join(err, errors.New("error initialising snap syncer state")) + } + + eg, ectx := errgroup.WithContext(ctx) + + eg.Go(func() error { + defer func() { + s.log.Infow("pool latest block done") + if err := recover(); err != nil { + s.log.Errorw("latest block pool panicked", "err", err) + } + }() + + return s.poolLatestBlock(ectx) + }) + + eg.Go(func() error { + defer func() { + if err := recover(); err != nil { + s.log.Errorw("class range panicked", "err", err) + } + }() + + err := s.runClassRangeWorker(ectx) + if err != nil { + s.log.Errorw("error in class range worker", "err", err) + } + return err + }) + + eg.Go(func() error { + defer func() { + if err := recover(); err != nil { + s.log.Errorw("address range paniced", "err", err) + } + }() + + err := s.runContractRangeWorker(ectx) + if err != nil { + s.log.Errorw("error in address range worker", "err", err) + } + + s.log.Infow("contract range done") + close(s.contractRangeDone) + close(s.classesJob) + + return err + }) + + storageEg, sctx := errgroup.WithContext(ectx) + for i := 0; i < storageJobWorkerCount; i++ { + storageEg.Go(func() error { + defer func() { + if err := recover(); err != nil { + s.log.Errorw("storage worker paniced", "err", err) + } + }() + + err := s.runStorageRangeWorker(sctx, i) + if err != nil { + s.log.Errorw("error in storage range worker", "err", err) + } + s.log.Infow("Storage worker completed", "workerId", i) + + return err + }) + } + + // For notifying that storage range is done + eg.Go(func() error { + err := storageEg.Wait() + if err != nil { + return err + } + + s.log.Infow("Storage range range completed") + close(s.storageRangeDone) + return nil + }) + + eg.Go(func() error { + defer func() { + if err := recover(); err != nil { + s.log.Errorw("storage refresh paniced", "err", err) + } + }() + + err := s.runStorageRefreshWorker(ectx) + if err != nil { + s.log.Errorw("error in storage refresh worker", "err", err) + } + + return err + }) + + for i := 0; i < fetchClassWorkerCount; i++ { + eg.Go(func() error { + err := s.runFetchClassWorker(ectx, i) + if err != nil { + s.log.Errorw("fetch class failed", "err", err) + } + s.log.Infow("fetch class completed", "workerId", i) + return err + }) + } + + err = eg.Wait() + if err != nil { + return err + } + + s.log.Infow("first phase completed", "duration", time.Since(starttime)) + + return nil +} + +func (s *SnapSyncer) PhraseVerify(ctx context.Context) error { + // 1. Get the actual class & contract trie roots + st, closer, err := s.blockchain.(*blockchain.Blockchain).HeadStateFreakingState() + defer func() { _ = closer() }() + if err != nil { + s.log.Errorw("error getting state for state root", "err", err) + return err + } + contractRoot, classRoot, err := st.StateAndClassRoot() + if err != nil { + s.log.Errorw("error getting contract and class root", "err", err) + return err + } + + // 2. Verify the global state root + err = VerifyGlobalStateRoot(s.currentGlobalStateRoot, classRoot, contractRoot) + if err == nil { + s.log.Infow("PhraseVerify", + "global state root", s.currentGlobalStateRoot, "contract root", contractRoot, "class root", classRoot) + // all good no need for additional verification + return nil + } else { + s.log.Errorw("global state root verification failure", "err", err) + } + + // 3. Get the correct tries roots from the client + iter, err := s.client.RequestContractRange(ctx, &spec.ContractRangeRequest{ + StateRoot: core2p2p.AdaptHash(s.currentGlobalStateRoot), + Start: core2p2p.AdaptAddress(&felt.Zero), + ChunksPerProof: 1, + }) + if err != nil { + s.log.Errorw("error getting contract range from client", "err", err) + return err + } + + var classR, contractR *felt.Felt + iter(func(response *spec.ContractRangeResponse) bool { + if _, ok := response.GetResponses().(*spec.ContractRangeResponse_Range); ok { + classR = p2p2core.AdaptHash(response.ClassesRoot) + contractR = p2p2core.AdaptHash(response.ContractsRoot) + } else { + s.log.Errorw("unexpected response", "response", response) + } + + return false + }) + if classR == nil || contractR == nil { + s.log.Errorw("cannot obtain the trie roots from client response") + return errors.New("cannot obtain the trie roots") + } + + // 4. Log which one is incorrect + s.log.Infow("Contract trie root", "expected", contractR, "actual", contractRoot) + s.log.Infow("Class trie root", "expected", classR, "actual", classRoot) + + return errors.New("trie roots verification failed") +} + +func (s *SnapSyncer) getNextStartingBlock(ctx context.Context) (*core.Block, error) { + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + head, err := s.starknetData.BlockLatest(ctx) + if err != nil { + s.log.Warnw("error getting current head", "error", err) + continue + } + startingBlock, err := s.starknetData.BlockByNumber(ctx, head.Number-newPivotHeadDistance) + if err != nil { + s.log.Warnw("error getting starting block", "error", err) + continue + } + + return startingBlock, nil + } +} + +func (s *SnapSyncer) initState(ctx context.Context) error { + startingBlock, err := s.getNextStartingBlock(ctx) + if err != nil { + return errors.Join(err, errors.New("error getting current head")) + } + + s.startingBlock = startingBlock.Header + s.lastBlock = startingBlock.Header + + fmt.Printf("Start state root is %s\n", s.startingBlock.GlobalStateRoot) + s.currentGlobalStateRoot = s.startingBlock.GlobalStateRoot.Clone() + s.storageRangeJobCount = 0 + s.storageRangeJobQueue = make(chan *storageRangeJob, storageJobQueueSize) + s.classesJob = make(chan *felt.Felt, classesJobQueueSize) + + s.contractRangeDone = make(chan interface{}) + s.storageRangeDone = make(chan interface{}) + + s.mtxM = &sync.Mutex{} + s.mtxN = &sync.Mutex{} + s.mtxL = &sync.Mutex{} + + s.log.Infow("init state completed", "startingBlock", s.startingBlock.Number) + return nil +} + +func CalculatePercentage(f *felt.Felt) uint64 { + const maxPercent = 100 + maxint := big.NewInt(1) + maxint.Lsh(maxint, core.GlobalTrieHeight) + + percent := f.BigInt(big.NewInt(0)) + percent.Mul(percent, big.NewInt(maxPercent)) + percent.Div(percent, maxint) + + return percent.Uint64() +} + +//nolint:gocyclo,funlen +func (s *SnapSyncer) runClassRangeWorker(ctx context.Context) error { + totalAdded := 0 + completed := false + startAddr := &felt.Zero + + s.log.Infow("class range worker entering infinite loop") + for !completed { + stateRoot := s.currentGlobalStateRoot + + // TODO: Maybe timeout + classIter, err := s.client.RequestClassRange(ctx, &spec.ClassRangeRequest{ + Root: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptHash(startAddr), + ChunksPerProof: uint32(classRangeChunksPerProof), + }) + if err != nil { + s.log.Errorw("error getting class range from client", "err", err) + // retry policy? with increased intervals + // reason for stopping - it's irrecoverable - we don't have a peer + return err + } + + ResponseIter: + for response := range classIter { + if response == nil { + s.log.Errorw("contract range respond with nil response") + continue + } + + var classes []*spec.Class + switch v := response.GetResponses().(type) { + case *spec.ClassRangeResponse_Classes: + classes = v.Classes.Classes + case *spec.ClassRangeResponse_Fin: + break ResponseIter + default: + s.log.Warnw("Unexpected class range message", "GetResponses", v) + continue + } + + if len(classes) == 0 { + s.log.Errorw("class range respond with empty classes") + continue + } + if response.RangeProof == nil { + s.log.Errorw("class range respond with nil proof") + continue + } + + classRoot := p2p2core.AdaptHash(response.ClassesRoot) + contractRoot := p2p2core.AdaptHash(response.ContractsRoot) + err = VerifyGlobalStateRoot(stateRoot, classRoot, contractRoot) + if err != nil { + // Root verification failed + // TODO: Ban peer + s.log.Errorw("global state root verification failure", "err", err) + return err + } + + s.log.Infow("class range progress", "progress", CalculatePercentage(startAddr)) + s.log.Infow("class range info", "classes", len(classes), "totalAdded", totalAdded, "startAddr", startAddr) + + paths := make([]*felt.Felt, len(classes)) + values := make([]*felt.Felt, len(classes)) + coreClasses := make([]core.Class, len(classes)) + + egrp := errgroup.Group{} + + for i, cls := range classes { + coreClass := p2p2core.AdaptClass(cls) + egrp.Go(func() error { + coreClasses[i] = coreClass + paths[i] = CalculateClassHash(coreClass) + values[i] = CalculateCompiledClassHash(coreClass) + + // For verification, should be + // leafValue := crypto.Poseidon(leafVersion, compiledClassHash) + return nil + }) + } + + err = egrp.Wait() + if err != nil { + s.log.Errorw("class range adaptation failure", "err", err) + return err + } + + proofs := P2pProofToTrieProofs(response.RangeProof) + hasNext, err := VerifyTrie(classRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Poseidon) + if err != nil { + // TODO: Ban peer + s.log.Errorw("trie verification failed", "err", err) + return err + } + + // Ingest + coreClassesMap := map[felt.Felt]core.Class{} + coreClassesHashMap := map[felt.Felt]*felt.Felt{} + for i, coreClass := range coreClasses { + coreClassesMap[*paths[i]] = coreClass + coreClassesHashMap[*paths[i]] = values[i] + } + + err = s.blockchain.PutClasses(s.lastBlock.Number, coreClassesHashMap, coreClassesMap) + if err != nil { + fmt.Printf("Unable to update the chain %s\n", err) + panic(err) + } + totalAdded += len(classes) + + if !hasNext { + s.log.Infow("class range completed", "totalClass", totalAdded) + return nil + } + + // Increment addr, start loop again + startAddr = paths[len(paths)-1] + } + } + + s.log.Infow("class range worker exits infinite loop") + return nil +} + +//nolint:gocyclo,funlen +func (s *SnapSyncer) runFetchClassWorker(ctx context.Context, workerIdx int) error { + keyBatches := make([]*felt.Felt, 0) + s.log.Infow("class fetch worker entering infinite loop", "worker", workerIdx) + for { + requestloop: + for len(keyBatches) < classBatchSize { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(JobDuration): + // Just request whatever we have + if len(keyBatches) > 0 { + break requestloop + } + s.log.Infow("waiting for more class job", "worker", workerIdx, "pendind", s.classFetchJobCount) + case key := <-s.classesJob: + if key == nil { + // channel finished. + if len(keyBatches) > 0 { + break requestloop + } else { + // Worker finished + return nil + } + } else { + if key.Equal(&felt.Zero) { + continue + } + + // TODO: Can be done in batches, Note: return nil if class is not found, no error + cls, err := s.blockchain.GetClasses([]*felt.Felt{key}) + if err != nil { + s.log.Errorw("error getting class", "err", err) + return err + } + + if cls[0] == nil { + keyBatches = append(keyBatches, key) + } + } + atomic.AddInt32(&s.classFetchJobCount, -1) + } + } + + var hashes []*spec.Hash + for _, key := range keyBatches { + hashes = append(hashes, core2p2p.AdaptHash(key)) + } + + classIter, err := s.client.RequestClassesByKeys(ctx, &spec.ClassHashesRequest{ + ClassHashes: hashes, + }) + if err != nil { + s.log.Errorw("error fetching classes by hash from client", "err", err) + // retry? + return err + } + + classes := make([]*spec.Class, 0, len(keyBatches)) + ResponseIter: + for response := range classIter { + if response == nil { + s.log.Errorw("class by keys respond with nil response") + continue + } + + switch v := response.ClassMessage.(type) { + case *spec.ClassesResponse_Class: + classes = append(classes, v.Class) + case *spec.ClassesResponse_Fin: + break ResponseIter + default: + s.log.Warnw("Unexpected ClassMessage from getClasses", "v", v) + } + } + + processedClasses := map[felt.Felt]bool{} + newClasses := map[felt.Felt]core.Class{} + classHashes := map[felt.Felt]*felt.Felt{} + for i, class := range classes { + if class == nil { + s.log.Infow("class empty", "hash", keyBatches[i]) + continue + } + + coreClass := p2p2core.AdaptClass(class) + newClasses[*keyBatches[i]] = coreClass + h, err := coreClass.Hash() + if err != nil { + s.log.Errorw("error hashing class", "err", err) + return err + } + + if !h.Equal(keyBatches[i]) { + s.log.Warnw("invalid classhash", "got", h, "expected", keyBatches[i]) + return errors.New("invalid class hash") + } + + if coreClass.Version() == 1 { + classHashes[*keyBatches[i]] = coreClass.(*core.Cairo1Class).Compiled.Hash() + } + + processedClasses[*keyBatches[i]] = true + } + + if len(newClasses) != 0 { + err = s.blockchain.PutClasses(s.lastBlock.Number, classHashes, newClasses) + if err != nil { + s.log.Errorw("error storing class", "err", err) + return err + } + } else { + s.log.Errorw("Unable to fetch any class from peer") + // TODO: Penalise peer? + } + + newBatch := make([]*felt.Felt, 0) + for _, classHash := range keyBatches { + if _, ok := processedClasses[*classHash]; !ok { + newBatch = append(newBatch, classHash) + } + } + + keyBatches = newBatch + } +} + +//nolint:gocyclo,funlen +func (s *SnapSyncer) runContractRangeWorker(ctx context.Context) error { + totalAdded := 0 + startAddr := &felt.Zero + completed := false + + s.log.Infow("contract range worker entering infinite loop") + for !completed { + stateRoot := s.currentGlobalStateRoot + iter, err := s.client.RequestContractRange(ctx, &spec.ContractRangeRequest{ + Domain: 0, // What do this do? + StateRoot: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptAddress(startAddr), + End: nil, // No need for now. + ChunksPerProof: uint32(contractRangeChunkPerProof), + }) + if err != nil { + s.log.Errorw("error getting contract range from client", "err", err) + // retry? + return err + } + + ResponseIter: + for response := range iter { + if response == nil { + s.log.Errorw("contract range respond with nil response") + continue + } + + var crange *spec.ContractRange + switch v := response.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + crange = v.Range + case *spec.ContractRangeResponse_Fin: + break ResponseIter + default: + s.log.Warnw("Unexpected contract range message", "GetResponses", v) + continue + } + + if crange == nil || crange.State == nil { + s.log.Errorw("contract range respond with nil state") + continue + } + if response.RangeProof == nil { + s.log.Errorw("contract range respond with nil proof") + continue + } + + s.log.Infow("contract range progress", "progress", CalculatePercentage(startAddr)) + s.log.Infow("contract range info", "states", len(crange.State), "totalAdded", totalAdded, "startAddr", startAddr) + rangeProgress.Set(float64(CalculatePercentage(startAddr))) + + classRoot := p2p2core.AdaptHash(response.ClassesRoot) + contractRoot := p2p2core.AdaptHash(response.ContractsRoot) + err := VerifyGlobalStateRoot(stateRoot, classRoot, contractRoot) + if err != nil { + // TODO: Ban peer + s.log.Errorw("global state root verification failure", "err", err) + return err + } + + paths := make([]*felt.Felt, len(crange.State)) + values := make([]*felt.Felt, len(crange.State)) + + for i, state := range crange.State { + paths[i] = p2p2core.AdaptAddress(state.Address) + values[i] = CalculateContractStateHash(state) + } + + proofs := P2pProofToTrieProofs(response.RangeProof) + hasNext, err := VerifyTrie(contractRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Pedersen) + if err != nil { + // The peer should get penalised in this case + s.log.Errorw("trie verification failed", "err", err) + return err + } + + classes := []*felt.Felt{} + nonces := []*felt.Felt{} + for _, r := range crange.State { + classHash := p2p2core.AdaptHash(r.Class) + classes = append(classes, classHash) + nonces = append(nonces, (&felt.Felt{}).SetUint64(r.Nonce)) + } + + err = s.blockchain.PutContracts(paths, nonces, classes) + if err != nil { + fmt.Printf("Unable to update the chain %s\n", err) + panic(err) + } + totalAdded += len(paths) + + // We don't actually store it directly here... only put it as part of job. + // Can't remember why. Could be because it would be some wasted work. + for _, r := range crange.State { + path := p2p2core.AdaptAddress(r.Address) + storageRoot := p2p2core.AdaptHash(r.Storage) + classHash := p2p2core.AdaptHash(r.Class) + nonce := r.Nonce + + err = s.queueClassJob(ctx, classHash) + if err != nil { + s.log.Errorw("error queue class fetch job", "err", err) + return err + } + + err = s.queueStorageRangeJob(ctx, path, storageRoot, classHash, nonce) + if err != nil { + s.log.Errorw("error queue storage refresh job", "err", err) + return err + } + } + + if !hasNext { + s.log.Infow("[hasNext] contract range completed") + return nil + } + + if len(paths) == 0 { + return nil + } + + startAddr = paths[len(paths)-1] + } + } + s.log.Infow("contract range worker exits infinite loop") + + return nil +} + +//nolint:funlen,gocyclo +func (s *SnapSyncer) runStorageRangeWorker(ctx context.Context, workerIdx int) error { + nextjobs := make([]*storageRangeJob, 0) + s.log.Infow("storage range worker entering infinite loop", "worker", workerIdx) + for { + jobs := nextjobs + + requestloop: + for len(jobs) < storageBatchSize { + contractDoneChecker := s.contractRangeDone + if s.storageRangeJobCount > 0 { + contractDoneChecker = nil // So that it never complete as there are job to be done + } + + select { + case job := <-s.storageRangeJobQueue: + jobs = append(jobs, job) + case <-ctx.Done(): + return ctx.Err() + case <-time.After(JobDuration): + if len(jobs) > 0 { + break requestloop + } + s.log.Infow("waiting for more storage job", "len(jobs)", len(jobs), "worker", workerIdx, "pending", s.storageRangeJobCount) + case <-contractDoneChecker: + // Its done... + return nil + } + } + + requests := make([]*spec.StorageRangeQuery, 0) + for _, job := range jobs { + requests = append(requests, &spec.StorageRangeQuery{ + Address: core2p2p.AdaptAddress(job.path), + Start: &spec.StorageLeafQuery{ + ContractStorageRoot: core2p2p.AdaptHash(job.storageRoot), + + // TODO: Should be address + Key: core2p2p.AdaptFelt(job.startKey), + }, + }) + } + + stateRoot := s.currentGlobalStateRoot + processedJobs := struct { + jobIdx int + jobAddr *felt.Felt + address *felt.Felt + }{} + storage := map[felt.Felt]map[felt.Felt]*felt.Felt{} + totalPath := 0 + maxPerStorageSize := 0 + + //nolint:nolintlint // s.log.Infow("storage range", + // "rootDistance", s.lastBlock.Number-s.startingBlock.Number, + // "root", stateRoot.String(), + // "requestcount", len(requests), + // "worker", workerIdx, + //) + iter, err := s.client.RequestStorageRange(ctx, &spec.ContractStorageRequest{ + StateRoot: core2p2p.AdaptHash(stateRoot), + ChunksPerProof: uint32(storageRangeChunkPerProof), + Query: requests, + }) + if err != nil { + s.log.Errorw("Error with storage range request", "err", err) + // Well... need to figure out how to determine if its a temporary error or not. + // For sure, the state root can be outdated, so this need to restart + return err + } + + ResponseIter: + for response := range iter { + if response == nil { + s.log.Errorw("storage range respond with nil response", + "worker", workerIdx, "job", processedJobs.jobIdx) + continue + } + + var csto *spec.ContractStorage + switch v := response.GetResponses().(type) { + case *spec.ContractStorageResponse_Storage: + csto = v.Storage + case *spec.ContractStorageResponse_Fin: + break ResponseIter + default: + s.log.Warnw("Unexpected storage range message", "GetResponses", v) + continue + } + + if csto == nil || csto.KeyValue == nil { + s.log.Errorw("storage range respond with nil storage", + "worker", workerIdx, "job", processedJobs.jobIdx, + "address", p2p2core.AdaptAddress(response.ContractAddress), + "job addr", processedJobs.address) + continue + } + if response.RangeProof == nil { + s.log.Errorw("storage range respond with nil proof", + "worker", workerIdx, "job", processedJobs.jobIdx, + "address", p2p2core.AdaptAddress(response.ContractAddress), + "job addr", processedJobs.address) + continue + } + + storageAddr := p2p2core.AdaptAddress(response.ContractAddress) + storageRange := csto.KeyValue + processedJobs.address = storageAddr + + job := jobs[processedJobs.jobIdx] + processedJobs.jobAddr = job.path + if !job.path.Equal(storageAddr) { + s.log.Infow("[Missed response?] storage chunks completed", + "address", storageAddr, "jobAddr", processedJobs.jobAddr, "worker", workerIdx, "job", processedJobs.jobIdx) + // move to the next job + processedJobs.jobIdx++ + job = jobs[processedJobs.jobIdx] + + // sanity check + if !job.path.Equal(storageAddr) { + s.log.Errorw("Sanity check: next job does not match response", + "job addr", job.path, "got", storageAddr, "worker", workerIdx) + // what to do? + } + processedJobs.address = storageAddr + processedJobs.jobAddr = job.path + } + + // Validate response + paths := make([]*felt.Felt, len(storageRange)) + values := make([]*felt.Felt, len(storageRange)) + + for i, v := range storageRange { + paths[i] = p2p2core.AdaptFelt(v.Key) + values[i] = p2p2core.AdaptFelt(v.Value) + } + + proofs := P2pProofToTrieProofs(response.RangeProof) + hasNext, err := VerifyTrie(job.storageRoot, paths, values, proofs, core.ContractStorageTrieHeight, crypto.Pedersen) + if err != nil { + s.log.Errorw("trie verification failed", + "contract", storageAddr, "expected root", job.storageRoot, "err", err) + // It is unclear how to distinguish if the peer is malicious/broken/non-bizantine or the contracts root is outdated. + err = s.queueStorageRefreshJob(ctx, job) + if err != nil { + s.log.Errorw("error queue storage refresh job", "err", err) + return err + } + + // Ok, what now - we cannot just move on to next job, because server will + // still respond to this contract until it exhaust all leaves + continue + } + + if storage[*job.path] == nil { + storage[*job.path] = map[felt.Felt]*felt.Felt{} + } + for i, path := range paths { + storage[*job.path][*path] = values[i] + } + + totalPath += len(paths) + if maxPerStorageSize < len(storage[*job.path]) { + maxPerStorageSize = len(storage[*job.path]) + } + + if totalPath > maxStorageBatchSize || maxPerStorageSize > maxMaxPerStorageSize { + // Only after a certain amount of path, we store it + // so that the storing part is more efficient + storageAddressCount.Observe(float64(len(storage))) + err = s.blockchain.PutStorage(storage) + if err != nil { + s.log.Errorw("error store", "err", err) + return err + } + + totalPath = 0 + maxPerStorageSize = 0 + storage = map[felt.Felt]map[felt.Felt]*felt.Felt{} + } + + if hasNext { + // note this is just tracking leaves keys on our side + // but it cannot change the request. Maybe can be used for retry procedure + job.startKey = paths[len(paths)-1] + } else { + processedJobs.jobIdx++ + atomic.AddInt32(&s.storageRangeJobCount, -1) // its... done? + } + } + + storageAddressCount.Observe(float64(len(storage))) + err = s.blockchain.PutStorage(storage) + if err != nil { + s.log.Errorw("store raw err", "err", err) + return err + } + + // TODO: assign to nil to clear memory + nextjobs = make([]*storageRangeJob, 0) + for i := processedJobs.jobIdx; i < len(jobs); i++ { + unprocessedRequest := jobs[i] + nextjobs = append(nextjobs, unprocessedRequest) + } + processedJobs.jobIdx = 0 + processedJobs.address = nil + processedJobs.jobAddr = nil + } +} + +//nolint:gocyclo +func (s *SnapSyncer) runStorageRefreshWorker(ctx context.Context) error { + // In ethereum, this is normally done with get tries, but since we don't have that here, we'll have to be + // creative. This does mean that this is impressively inefficient. + var job *storageRangeJob + + s.log.Infow("storage refresh worker entering infinite loop") + for { + if job == nil { + requestloop: + for { + contractDoneChecker := s.contractRangeDone + if s.storageRangeJobCount > 0 { + contractDoneChecker = nil // So that it never complete as there are job to be done + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(JobDuration): + s.log.Infow("no storage refresh job") + case <-contractDoneChecker: + // Its done... + return nil + case job = <-s.storageRefreshJob: + break requestloop + } + } + } + + stateRoot := s.currentGlobalStateRoot + ctrIter, err := s.client.RequestContractRange(ctx, &spec.ContractRangeRequest{ + Domain: 0, // What do this do? + StateRoot: core2p2p.AdaptHash(stateRoot), + Start: core2p2p.AdaptAddress(job.path), + End: core2p2p.AdaptAddress(job.path), + ChunksPerProof: 10, + }) + if err != nil { + s.log.Errorw("Error with contract range refresh request", "err", err) + return err + } + + ResponseIter: + for response := range ctrIter { + if response == nil { + s.log.Errorw("contract range [storage refresh] respond with nil response") + continue + } + + var crange *spec.ContractRange + switch v := response.GetResponses().(type) { + case *spec.ContractRangeResponse_Range: + crange = v.Range + case *spec.ContractRangeResponse_Fin: + break ResponseIter + default: + s.log.Warnw("Unexpected contract range message [storage refresh]", "GetResponses", v) + continue + } + + if crange == nil || crange.State == nil { + s.log.Errorw("contract range [storage refresh] respond with nil state") + continue + } + if response.RangeProof == nil { + s.log.Errorw("contract range [storage refresh] respond with nil proof") + continue + } + + classRoot := p2p2core.AdaptHash(response.ClassesRoot) + contractRoot := p2p2core.AdaptHash(response.ContractsRoot) + err := VerifyGlobalStateRoot(stateRoot, classRoot, contractRoot) + if err != nil { + // Root verification failed + // TODO: Ban peer + return err + } + + paths := make([]*felt.Felt, len(crange.State)) + values := make([]*felt.Felt, len(crange.State)) + + for i, rangeValue := range crange.State { + paths[i] = p2p2core.AdaptAddress(rangeValue.Address) + values[i] = CalculateContractStateHash(rangeValue) + } + + proofs := P2pProofToTrieProofs(response.RangeProof) + _, err = VerifyTrie(contractRoot, paths, values, proofs, core.GlobalTrieHeight, crypto.Pedersen) + if err != nil { + // The peer should get penalised in this case + return err + } + + job.storageRoot = p2p2core.AdaptHash(crange.State[0].Storage) + newClass := p2p2core.AdaptHash(crange.State[0].Storage) + if newClass != job.classHash { + err = s.queueClassJob(ctx, newClass) + if err != nil { + s.log.Errorw("error queue class fetch job", "err", err) + return err + } + } + + err = s.queueStorageRangeJobJob(ctx, job) + if err != nil { + s.log.Errorw("error queue storage refresh job", "err", err) + return err + } + + job = nil + } + } +} + +func (s *SnapSyncer) queueClassJob(ctx context.Context, classHash *felt.Felt) error { + queued := false + for !queued { + select { + case s.classesJob <- classHash: + atomic.AddInt32(&s.classFetchJobCount, 1) + queued = true + case <-ctx.Done(): + return ctx.Err() + case <-time.After(30 * time.Second): //nolint:mnd // TODO: remove this, only use temporarily for snap sync + s.log.Infow("class queue stall on class") + } + } + return nil +} + +func (s *SnapSyncer) queueStorageRangeJob(ctx context.Context, path, storageRoot, classHash *felt.Felt, nonce uint64) error { + return s.queueStorageRangeJobJob(ctx, &storageRangeJob{ + path: path, + storageRoot: storageRoot, + startKey: &felt.Zero, + classHash: classHash, + nonce: nonce, + }) +} + +func (s *SnapSyncer) queueStorageRangeJobJob(ctx context.Context, job *storageRangeJob) error { + if job.storageRoot == nil || job.storageRoot.IsZero() { + // contract's with storage root of 0x0 has no storage + return nil + } + + queued := false + for !queued { + select { + case s.storageRangeJobQueue <- job: + queued = true + atomic.AddInt32(&s.storageRangeJobCount, 1) + case <-ctx.Done(): + return ctx.Err() + case <-time.After(JobDuration): + s.log.Infow("queue storage range stall") + } + } + return nil +} + +func (s *SnapSyncer) queueStorageRefreshJob(ctx context.Context, job *storageRangeJob) error { + queued := false + for !queued { + select { + case s.storageRefreshJob <- job: + queued = true + case <-ctx.Done(): + return ctx.Err() + case <-time.After(10 * time.Second): //nolint:mnd // TODO: remove this, only use temporarily for snap sync + s.log.Infow("storage refresh queue stall") + } + } + return nil +} + +func (s *SnapSyncer) poolLatestBlock(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + case <-time.After(JobDuration): + break + case <-s.storageRangeDone: + return nil + } + + newTarget, err := s.getNextStartingBlock(ctx) + if err != nil { + return errors.Join(err, errors.New("error getting current head")) + } + + // TODO: Race issue + if newTarget.Number-s.lastBlock.Number < uint64(maxPivotDistance) { + s.log.Infow("Not updating pivot yet", "lastblock", s.lastBlock.Number, + "newTarget", newTarget.Number, "diff", newTarget.Number-s.lastBlock.Number) + continue + } + + pivotUpdates.Inc() + + s.log.Infow("Switching snap pivot", "hash", newTarget.Hash, "number", newTarget.Number) + s.lastBlock = newTarget.Header + + fmt.Printf("Current state root is %s", s.lastBlock.GlobalStateRoot) + s.currentGlobalStateRoot = s.lastBlock.GlobalStateRoot + } +} + +func (s *SnapSyncer) ApplyStateUpdate(blockNumber uint64) error { + return errors.New("unimplemented") +} + +func P2pProofToTrieProofs(proof *spec.PatriciaRangeProof) []trie.ProofNode { + // TODO: Move to adapter + + proofs := make([]trie.ProofNode, len(proof.Nodes)) + for i, node := range proof.Nodes { + if node.GetBinary() != nil { + binary := node.GetBinary() + proofs[i] = &trie.Binary{ + LeftHash: p2p2core.AdaptFelt(binary.Left), + RightHash: p2p2core.AdaptFelt(binary.Right), + } + } else { + edge := node.GetEdge() + // TODO. What if edge is nil too? + key := trie.NewKey(uint8(edge.Length), edge.Path.Elements) + proofs[i] = &trie.Edge{ + Child: p2p2core.AdaptFelt(edge.Value), + Path: &key, + } + } + } + + return proofs +} + +func VerifyGlobalStateRoot(globalStateRoot, classRoot, storageRoot *felt.Felt) error { + stateVersion := new(felt.Felt).SetBytes([]byte(`STARKNET_STATE_V0`)) + + if classRoot.IsZero() { + if globalStateRoot.Equal(storageRoot) { + return nil + } else { + return errors.New("invalid global state root") + } + } + + if !crypto.PoseidonArray(stateVersion, storageRoot, classRoot).Equal(globalStateRoot) { + return errors.New("invalid global state root") + } + return nil +} + +func VerifyTrie( + expectedRoot *felt.Felt, + paths, hashes []*felt.Felt, + proofs []trie.ProofNode, + height uint8, + hash func(*felt.Felt, *felt.Felt) *felt.Felt, +) (bool, error) { + hasMore, valid, err := trie.VerifyRange(expectedRoot, nil, paths, hashes, proofs, hash, height) + if err != nil { + return false, err + } + if !valid { + return false, errors.New("invalid proof") + } + + return hasMore, nil +} + +func CalculateClassHash(cls core.Class) *felt.Felt { + hash, err := cls.Hash() + if err != nil { + panic(err) + } + + return hash +} + +func CalculateCompiledClassHash(cls core.Class) *felt.Felt { + return cls.(*core.Cairo1Class).Compiled.Hash() +} + +func CalculateContractStateHash(value *spec.ContractState) *felt.Felt { + nonce := fp.NewElement(value.Nonce) + return calculateContractCommitment( + p2p2core.AdaptHash(value.Storage), + p2p2core.AdaptHash(value.Class), + felt.NewFelt(&nonce), + ) +} + +func calculateContractCommitment(storageRoot, classHash, nonce *felt.Felt) *felt.Felt { + return crypto.Pedersen(crypto.Pedersen(crypto.Pedersen(classHash, storageRoot), nonce), &felt.Zero) +} diff --git a/p2p/starknet/client.go b/p2p/starknet/client.go index bfeed7ab7a..a103c3f2d4 100644 --- a/p2p/starknet/client.go +++ b/p2p/starknet/client.go @@ -108,18 +108,49 @@ func (c *Client) RequestBlockHeaders( } func (c *Client) RequestEvents(ctx context.Context, req *spec.EventsRequest) (iter.Seq[*spec.EventsResponse], error) { - return requestAndReceiveStream[*spec.EventsRequest, *spec.EventsResponse](ctx, c.newStream, EventsPID(), req, c.log) + return requestAndReceiveStream[*spec.EventsRequest, *spec.EventsResponse]( + ctx, c.newStream, EventsPID(), req, c.log, + ) } func (c *Client) RequestClasses(ctx context.Context, req *spec.ClassesRequest) (iter.Seq[*spec.ClassesResponse], error) { - return requestAndReceiveStream[*spec.ClassesRequest, *spec.ClassesResponse](ctx, c.newStream, ClassesPID(), req, c.log) + return requestAndReceiveStream[*spec.ClassesRequest, *spec.ClassesResponse]( + ctx, c.newStream, ClassesPID(), req, c.log, + ) } func (c *Client) RequestStateDiffs(ctx context.Context, req *spec.StateDiffsRequest) (iter.Seq[*spec.StateDiffsResponse], error) { - return requestAndReceiveStream[*spec.StateDiffsRequest, *spec.StateDiffsResponse](ctx, c.newStream, StateDiffPID(), req, c.log) + return requestAndReceiveStream[*spec.StateDiffsRequest, *spec.StateDiffsResponse]( + ctx, c.newStream, StateDiffPID(), req, c.log, + ) } func (c *Client) RequestTransactions(ctx context.Context, req *spec.TransactionsRequest) (iter.Seq[*spec.TransactionsResponse], error) { return requestAndReceiveStream[*spec.TransactionsRequest, *spec.TransactionsResponse]( - ctx, c.newStream, TransactionsPID(), req, c.log) + ctx, c.newStream, TransactionsPID(), req, c.log, + ) +} + +func (c *Client) RequestClassRange(ctx context.Context, req *spec.ClassRangeRequest) (iter.Seq[*spec.ClassRangeResponse], error) { + return requestAndReceiveStream[*spec.ClassRangeRequest, *spec.ClassRangeResponse]( + ctx, c.newStream, SnapshotClassRangePID(), req, c.log, + ) +} + +func (c *Client) RequestContractRange(ctx context.Context, req *spec.ContractRangeRequest) (iter.Seq[*spec.ContractRangeResponse], error) { + return requestAndReceiveStream[*spec.ContractRangeRequest, *spec.ContractRangeResponse]( + ctx, c.newStream, SnapshotContractRangePID(), req, c.log, + ) +} + +func (c *Client) RequestStorageRange( + ctx context.Context, req *spec.ContractStorageRequest, +) (iter.Seq[*spec.ContractStorageResponse], error) { + return requestAndReceiveStream[*spec.ContractStorageRequest, *spec.ContractStorageResponse]( + ctx, c.newStream, SnapshotContractStorageRangePID(), req, c.log, + ) +} + +func (c *Client) RequestClassesByKeys(ctx context.Context, req *spec.ClassHashesRequest) (iter.Seq[*spec.ClassesResponse], error) { + return requestAndReceiveStream[*spec.ClassHashesRequest, *spec.ClassesResponse](ctx, c.newStream, SnapshotClassesPID(), req, c.log) } diff --git a/p2p/starknet/handlers.go b/p2p/starknet/handlers.go index 28c4eee5f3..138103b292 100644 --- a/p2p/starknet/handlers.go +++ b/p2p/starknet/handlers.go @@ -1,4 +1,5 @@ -//go:generate protoc --go_out=./ --proto_path=./ --go_opt=Mp2p/proto/transaction.proto=./spec --go_opt=Mp2p/proto/state.proto=./spec --go_opt=Mp2p/proto/snapshot.proto=./spec --go_opt=Mp2p/proto/receipt.proto=./spec --go_opt=Mp2p/proto/mempool.proto=./spec --go_opt=Mp2p/proto/event.proto=./spec --go_opt=Mp2p/proto/block.proto=./spec --go_opt=Mp2p/proto/common.proto=./spec p2p/proto/transaction.proto p2p/proto/state.proto p2p/proto/snapshot.proto p2p/proto/common.proto p2p/proto/block.proto p2p/proto/event.proto p2p/proto/receipt.proto +//go:generate protoc --go_out=./ --proto_path=./ --go_opt=Mp2p/proto/common.proto=./spec --go_opt=Mp2p/proto/header.proto=./spec --go_opt=Mp2p/proto/event.proto=./spec --go_opt=Mp2p/proto/receipt.proto=./spec --go_opt=Mp2p/proto/state.proto=./spec --go_opt=Mp2p/proto/transaction.proto=./spec --go_opt=Mp2p/proto/class.proto=./spec --go_opt=Mp2p/proto/snapshot.proto=./spec p2p/proto/common.proto p2p/proto/event.proto p2p/proto/header.proto p2p/proto/receipt.proto p2p/proto/state.proto p2p/proto/transaction.proto p2p/proto/class.proto p2p/proto/snapshot.proto + package starknet import ( @@ -21,8 +22,9 @@ import ( ) type Handler struct { - bcReader blockchain.Reader - log utils.SimpleLogger + bcReader blockchain.Reader + snapProvider SnapProvider + log utils.SimpleLogger ctx context.Context cancel context.CancelFunc @@ -40,6 +42,11 @@ func NewHandler(bcReader blockchain.Reader, log utils.SimpleLogger) *Handler { } } +func (h *Handler) WithSnapsyncSupport(provider SnapProvider) { + // TODO: should it be here? + h.snapProvider = provider +} + // bufferPool caches unused buffer objects for later reuse. var bufferPool = sync.Pool{ New: func() any { @@ -123,6 +130,38 @@ func (h *Handler) StateDiffHandler(stream network.Stream) { streamHandler[*spec.StateDiffsRequest](h.ctx, &h.wg, stream, h.onStateDiffRequest, h.log) } +func (h *Handler) ClassRangeHandler(stream network.Stream) { + if h.snapProvider == nil { + h.log.Debugw("SnapProvider not initialised") + return + } + streamHandler[*spec.ClassRangeRequest](h.ctx, &h.wg, stream, h.snapProvider.GetClassRange, h.log) +} + +func (h *Handler) ContractRangeHandler(stream network.Stream) { + if h.snapProvider == nil { + h.log.Debugw("SnapProvider not initialised") + return + } + streamHandler[*spec.ContractRangeRequest](h.ctx, &h.wg, stream, h.snapProvider.GetContractRange, h.log) +} + +func (h *Handler) ContractStorageHandler(stream network.Stream) { + if h.snapProvider == nil { + h.log.Debugw("SnapProvider not initialised") + return + } + streamHandler[*spec.ContractStorageRequest](h.ctx, &h.wg, stream, h.snapProvider.GetStorageRange, h.log) +} + +func (h *Handler) ClassHashesHandler(stream network.Stream) { + if h.snapProvider == nil { + h.log.Debugw("SnapProvider not initialised") + return + } + streamHandler[*spec.ClassHashesRequest](h.ctx, &h.wg, stream, h.snapProvider.GetClasses, h.log) +} + func (h *Handler) onHeadersRequest(req *spec.BlockHeadersRequest) (iter.Seq[proto.Message], error) { finMsg := &spec.BlockHeadersResponse{ HeaderMessage: &spec.BlockHeadersResponse_Fin{}, diff --git a/p2p/starknet/ids.go b/p2p/starknet/ids.go index d1b97b0ad2..741ac436e8 100644 --- a/p2p/starknet/ids.go +++ b/p2p/starknet/ids.go @@ -25,3 +25,19 @@ func ClassesPID() protocol.ID { func StateDiffPID() protocol.ID { return Prefix + "/state_diffs/0.1.0-rc.0" } + +func SnapshotClassRangePID() protocol.ID { + return Prefix + "/snapshots/class_range/0.1.0-rc.0" +} + +func SnapshotContractRangePID() protocol.ID { + return Prefix + "/snapshots/contract_range/0.1.0-rc.0" +} + +func SnapshotContractStorageRangePID() protocol.ID { + return Prefix + "/snapshots/storage_range/0.1.0-rc.0" +} + +func SnapshotClassesPID() protocol.ID { + return Prefix + "/snapshots/classes/0.1.0-rc.0" +} diff --git a/p2p/starknet/p2p/proto/class.proto b/p2p/starknet/p2p/proto/class.proto index 2cd3ed5265..227adca4e7 100644 --- a/p2p/starknet/p2p/proto/class.proto +++ b/p2p/starknet/p2p/proto/class.proto @@ -55,4 +55,13 @@ message ClassesResponse { Class class = 1; Fin fin = 2; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its classes. } -} \ No newline at end of file +} + +message Classes { + uint32 domain = 1; + repeated Class classes = 2; +} + +message ClassHashesRequest { + repeated Hash class_hashes = 1; +} diff --git a/p2p/starknet/p2p/proto/snapshot.proto b/p2p/starknet/p2p/proto/snapshot.proto new file mode 100644 index 0000000000..997c58c47a --- /dev/null +++ b/p2p/starknet/p2p/proto/snapshot.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +import "p2p/proto/common.proto"; +import "p2p/proto/state.proto"; +import "p2p/proto/class.proto"; + +message PatriciaNode { + message Edge { + uint32 length = 1; + Felt252 path = 2; // as bits of left/right + Felt252 value = 3; + } + message Binary { + Felt252 left = 1; + Felt252 right = 2; + } + + oneof node { + Edge edge = 1; + Binary binary = 2; + } +} + +// non leaf nodes required to build the trie given the range (leaves) +message PatriciaRangeProof { + repeated PatriciaNode nodes = 1; +} + +// leafs of the contract state tree +message ContractState { + Address address = 1; // the key + Hash class = 2; + Hash storage = 3; // patricia + uint64 nonce = 4; +} + +// request a range from the contract state tree that matches the given root (block) +// starts at 'start' and ends no more than 'end'. +// the result is (ContractRange+, PatriciaRangeProof)* +message ContractRangeRequest { + uint32 domain = 1; // volition + Hash state_root = 2; + Address start = 3; + Address end = 4; + uint32 chunks_per_proof = 5; // how many ContractRange items to send before sending a proof +} + +// stream of leaves in the contracts tree +message ContractRange { + repeated ContractState state = 1; +} + +message ContractRangeResponse { + optional Hash root = 1; // may not appear if Fin is sent to end the whole response + optional Hash contracts_root = 2;// may not appear if Fin is sent to end the whole response + optional Hash classes_root = 3;// may not appear if Fin is sent to end the whole response + oneof responses { + ContractRange range = 4; + Fin fin = 5; + } + PatriciaRangeProof range_proof = 6; +} + +// duplicate of GetContractRange. Can introduce a 'type' instead. +// result is (Classes+, PatriciaRangeProof)* +message ClassRangeRequest { + Hash root = 1; + Hash start = 2; + Hash end = 3; + uint32 chunks_per_proof = 4; +} + +message ClassRangeResponse { + optional Hash root = 1; // may not appear if Fin is sent to end the whole response + optional Hash contracts_root = 2;// may not appear if Fin is sent to end the whole response + optional Hash classes_root = 3;// may not appear if Fin is sent to end the whole response + oneof responses { + Classes classes = 4; + Fin fin = 5; + } + PatriciaRangeProof range_proof = 6; +} + +// A position in some contract's state tree is identified by the state tree's root and the key in it +message StorageLeafQuery { + Hash contract_storage_root = 1; + Felt252 key = 2; +} + +message StorageRangeQuery { + Address address = 3; + StorageLeafQuery start = 1; + StorageLeafQuery end = 2; +} + +// result is (ContractStorageRange+, PatriciaRangeProof)* +message ContractStorageRequest { + uint32 domain = 1; // volition + Hash state_root = 2; + uint32 chunks_per_proof = 5; // how many ContractRange items to send before sending a proof + repeated StorageRangeQuery query = 3; +} + +message ContractStorage { + repeated ContractStoredValue keyValue = 2; +} + +message ContractStorageResponse { + optional Hash state_root = 1; // may not appear if Fin is sent to end the whole response + optional Address contract_address = 2; + oneof responses { + ContractStorage storage = 3; + Fin fin = 4; + } + PatriciaRangeProof range_proof = 5; +} diff --git a/p2p/starknet/p2p/proto/state.proto b/p2p/starknet/p2p/proto/state.proto index 79f33fcafe..228897c242 100644 --- a/p2p/starknet/p2p/proto/state.proto +++ b/p2p/starknet/p2p/proto/state.proto @@ -34,4 +34,4 @@ message StateDiffsResponse { DeclaredClass declared_class = 2; Fin fin = 3; // Fin is sent after the peer sent all the data or when it encountered a block that it doesn't have its state diff. } -} \ No newline at end of file +} diff --git a/p2p/starknet/snap_provider.go b/p2p/starknet/snap_provider.go new file mode 100644 index 0000000000..59ede0d192 --- /dev/null +++ b/p2p/starknet/snap_provider.go @@ -0,0 +1,15 @@ +package starknet + +import ( + "iter" + + "github.com/NethermindEth/juno/p2p/starknet/spec" + "google.golang.org/protobuf/proto" +) + +type SnapProvider interface { + GetClassRange(request *spec.ClassRangeRequest) (iter.Seq[proto.Message], error) + GetContractRange(request *spec.ContractRangeRequest) (iter.Seq[proto.Message], error) + GetStorageRange(request *spec.ContractStorageRequest) (iter.Seq[proto.Message], error) + GetClasses(request *spec.ClassHashesRequest) (iter.Seq[proto.Message], error) +} diff --git a/p2p/starknet/spec/class.pb.go b/p2p/starknet/spec/class.pb.go index c0d591d4ca..a531442a65 100644 --- a/p2p/starknet/spec/class.pb.go +++ b/p2p/starknet/spec/class.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/class.proto package spec @@ -570,6 +570,108 @@ func (*ClassesResponse_Class) isClassesResponse_ClassMessage() {} func (*ClassesResponse_Fin) isClassesResponse_ClassMessage() {} +type Classes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain uint32 `protobuf:"varint,1,opt,name=domain,proto3" json:"domain,omitempty"` + Classes []*Class `protobuf:"bytes,2,rep,name=classes,proto3" json:"classes,omitempty"` +} + +func (x *Classes) Reset() { + *x = Classes{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_class_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Classes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Classes) ProtoMessage() {} + +func (x *Classes) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_class_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Classes.ProtoReflect.Descriptor instead. +func (*Classes) Descriptor() ([]byte, []int) { + return file_p2p_proto_class_proto_rawDescGZIP(), []int{8} +} + +func (x *Classes) GetDomain() uint32 { + if x != nil { + return x.Domain + } + return 0 +} + +func (x *Classes) GetClasses() []*Class { + if x != nil { + return x.Classes + } + return nil +} + +type ClassHashesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClassHashes []*Hash `protobuf:"bytes,1,rep,name=class_hashes,json=classHashes,proto3" json:"class_hashes,omitempty"` +} + +func (x *ClassHashesRequest) Reset() { + *x = ClassHashesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_class_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClassHashesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClassHashesRequest) ProtoMessage() {} + +func (x *ClassHashesRequest) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_class_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClassHashesRequest.ProtoReflect.Descriptor instead. +func (*ClassHashesRequest) Descriptor() ([]byte, []int) { + return file_p2p_proto_class_proto_rawDescGZIP(), []int{9} +} + +func (x *ClassHashesRequest) GetClassHashes() []*Hash { + if x != nil { + return x.ClassHashes + } + return nil +} + var File_p2p_proto_class_proto protoreflect.FileDescriptor var file_p2p_proto_class_proto_rawDesc = []byte{ @@ -640,10 +742,19 @@ var file_p2p_proto_class_proto_rawDesc = []byte{ 0x05, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x03, 0x66, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x04, 0x2e, 0x46, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x66, 0x69, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x64, 0x45, 0x74, 0x68, 0x2f, 0x6a, 0x75, - 0x6e, 0x6f, 0x2f, 0x70, 0x32, 0x70, 0x2f, 0x73, 0x74, 0x61, 0x72, 0x6b, 0x6e, 0x65, 0x74, 0x2f, - 0x73, 0x70, 0x65, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x43, 0x0a, 0x07, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x07, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x06, 0x2e, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x07, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x22, 0x3e, 0x0a, 0x12, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x0c, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0b, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x64, 0x45, + 0x74, 0x68, 0x2f, 0x6a, 0x75, 0x6e, 0x6f, 0x2f, 0x70, 0x32, 0x70, 0x2f, 0x73, 0x74, 0x61, 0x72, + 0x6b, 0x6e, 0x65, 0x74, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -658,43 +769,47 @@ func file_p2p_proto_class_proto_rawDescGZIP() []byte { return file_p2p_proto_class_proto_rawDescData } -var file_p2p_proto_class_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_p2p_proto_class_proto_msgTypes = make([]protoimpl.MessageInfo, 10) var file_p2p_proto_class_proto_goTypes = []any{ - (*EntryPoint)(nil), // 0: EntryPoint - (*Cairo0Class)(nil), // 1: Cairo0Class - (*SierraEntryPoint)(nil), // 2: SierraEntryPoint - (*Cairo1EntryPoints)(nil), // 3: Cairo1EntryPoints - (*Cairo1Class)(nil), // 4: Cairo1Class - (*Class)(nil), // 5: Class - (*ClassesRequest)(nil), // 6: ClassesRequest - (*ClassesResponse)(nil), // 7: ClassesResponse - (*Felt252)(nil), // 8: Felt252 - (*Hash)(nil), // 9: Hash - (*Iteration)(nil), // 10: Iteration - (*Fin)(nil), // 11: Fin + (*EntryPoint)(nil), // 0: EntryPoint + (*Cairo0Class)(nil), // 1: Cairo0Class + (*SierraEntryPoint)(nil), // 2: SierraEntryPoint + (*Cairo1EntryPoints)(nil), // 3: Cairo1EntryPoints + (*Cairo1Class)(nil), // 4: Cairo1Class + (*Class)(nil), // 5: Class + (*ClassesRequest)(nil), // 6: ClassesRequest + (*ClassesResponse)(nil), // 7: ClassesResponse + (*Classes)(nil), // 8: Classes + (*ClassHashesRequest)(nil), // 9: ClassHashesRequest + (*Felt252)(nil), // 10: Felt252 + (*Hash)(nil), // 11: Hash + (*Iteration)(nil), // 12: Iteration + (*Fin)(nil), // 13: Fin } var file_p2p_proto_class_proto_depIdxs = []int32{ - 8, // 0: EntryPoint.selector:type_name -> Felt252 + 10, // 0: EntryPoint.selector:type_name -> Felt252 0, // 1: Cairo0Class.externals:type_name -> EntryPoint 0, // 2: Cairo0Class.l1_handlers:type_name -> EntryPoint 0, // 3: Cairo0Class.constructors:type_name -> EntryPoint - 8, // 4: SierraEntryPoint.selector:type_name -> Felt252 + 10, // 4: SierraEntryPoint.selector:type_name -> Felt252 2, // 5: Cairo1EntryPoints.externals:type_name -> SierraEntryPoint 2, // 6: Cairo1EntryPoints.l1_handlers:type_name -> SierraEntryPoint 2, // 7: Cairo1EntryPoints.constructors:type_name -> SierraEntryPoint 3, // 8: Cairo1Class.entry_points:type_name -> Cairo1EntryPoints - 8, // 9: Cairo1Class.program:type_name -> Felt252 + 10, // 9: Cairo1Class.program:type_name -> Felt252 1, // 10: Class.cairo0:type_name -> Cairo0Class 4, // 11: Class.cairo1:type_name -> Cairo1Class - 9, // 12: Class.class_hash:type_name -> Hash - 10, // 13: ClassesRequest.iteration:type_name -> Iteration + 11, // 12: Class.class_hash:type_name -> Hash + 12, // 13: ClassesRequest.iteration:type_name -> Iteration 5, // 14: ClassesResponse.class:type_name -> Class - 11, // 15: ClassesResponse.fin:type_name -> Fin - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 13, // 15: ClassesResponse.fin:type_name -> Fin + 5, // 16: Classes.classes:type_name -> Class + 11, // 17: ClassHashesRequest.class_hashes:type_name -> Hash + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_p2p_proto_class_proto_init() } @@ -800,6 +915,30 @@ func file_p2p_proto_class_proto_init() { return nil } } + file_p2p_proto_class_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Classes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_class_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ClassHashesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_p2p_proto_class_proto_msgTypes[5].OneofWrappers = []any{ (*Class_Cairo0)(nil), @@ -815,7 +954,7 @@ func file_p2p_proto_class_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_proto_class_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, diff --git a/p2p/starknet/spec/common.pb.go b/p2p/starknet/spec/common.pb.go index 25aedb5463..1b18c7c9c7 100644 --- a/p2p/starknet/spec/common.pb.go +++ b/p2p/starknet/spec/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/common.proto package spec diff --git a/p2p/starknet/spec/event.pb.go b/p2p/starknet/spec/event.pb.go index 8543465c35..5b559e259c 100644 --- a/p2p/starknet/spec/event.pb.go +++ b/p2p/starknet/spec/event.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/event.proto package spec diff --git a/p2p/starknet/spec/header.pb.go b/p2p/starknet/spec/header.pb.go index 5d94c6211b..49a8280b36 100644 --- a/p2p/starknet/spec/header.pb.go +++ b/p2p/starknet/spec/header.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/header.proto package spec diff --git a/p2p/starknet/spec/receipt.pb.go b/p2p/starknet/spec/receipt.pb.go index d3c83c97ff..5e29680442 100644 --- a/p2p/starknet/spec/receipt.pb.go +++ b/p2p/starknet/spec/receipt.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/receipt.proto package spec diff --git a/p2p/starknet/spec/snapshot.pb.go b/p2p/starknet/spec/snapshot.pb.go new file mode 100644 index 0000000000..fd6ee7f3a6 --- /dev/null +++ b/p2p/starknet/spec/snapshot.pb.go @@ -0,0 +1,1581 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v3.17.3 +// source: p2p/proto/snapshot.proto + +package spec + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PatriciaNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Node: + // + // *PatriciaNode_Edge_ + // *PatriciaNode_Binary_ + Node isPatriciaNode_Node `protobuf_oneof:"node"` +} + +func (x *PatriciaNode) Reset() { + *x = PatriciaNode{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PatriciaNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatriciaNode) ProtoMessage() {} + +func (x *PatriciaNode) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatriciaNode.ProtoReflect.Descriptor instead. +func (*PatriciaNode) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{0} +} + +func (m *PatriciaNode) GetNode() isPatriciaNode_Node { + if m != nil { + return m.Node + } + return nil +} + +func (x *PatriciaNode) GetEdge() *PatriciaNode_Edge { + if x, ok := x.GetNode().(*PatriciaNode_Edge_); ok { + return x.Edge + } + return nil +} + +func (x *PatriciaNode) GetBinary() *PatriciaNode_Binary { + if x, ok := x.GetNode().(*PatriciaNode_Binary_); ok { + return x.Binary + } + return nil +} + +type isPatriciaNode_Node interface { + isPatriciaNode_Node() +} + +type PatriciaNode_Edge_ struct { + Edge *PatriciaNode_Edge `protobuf:"bytes,1,opt,name=edge,proto3,oneof"` +} + +type PatriciaNode_Binary_ struct { + Binary *PatriciaNode_Binary `protobuf:"bytes,2,opt,name=binary,proto3,oneof"` +} + +func (*PatriciaNode_Edge_) isPatriciaNode_Node() {} + +func (*PatriciaNode_Binary_) isPatriciaNode_Node() {} + +// non leaf nodes required to build the trie given the range (leaves) +type PatriciaRangeProof struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nodes []*PatriciaNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (x *PatriciaRangeProof) Reset() { + *x = PatriciaRangeProof{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PatriciaRangeProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatriciaRangeProof) ProtoMessage() {} + +func (x *PatriciaRangeProof) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatriciaRangeProof.ProtoReflect.Descriptor instead. +func (*PatriciaRangeProof) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{1} +} + +func (x *PatriciaRangeProof) GetNodes() []*PatriciaNode { + if x != nil { + return x.Nodes + } + return nil +} + +// leafs of the contract state tree +type ContractState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address *Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // the key + Class *Hash `protobuf:"bytes,2,opt,name=class,proto3" json:"class,omitempty"` + Storage *Hash `protobuf:"bytes,3,opt,name=storage,proto3" json:"storage,omitempty"` // patricia + Nonce uint64 `protobuf:"varint,4,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *ContractState) Reset() { + *x = ContractState{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractState) ProtoMessage() {} + +func (x *ContractState) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractState.ProtoReflect.Descriptor instead. +func (*ContractState) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{2} +} + +func (x *ContractState) GetAddress() *Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *ContractState) GetClass() *Hash { + if x != nil { + return x.Class + } + return nil +} + +func (x *ContractState) GetStorage() *Hash { + if x != nil { + return x.Storage + } + return nil +} + +func (x *ContractState) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +// request a range from the contract state tree that matches the given root (block) +// starts at 'start' and ends no more than 'end'. +// the result is (ContractRange+, PatriciaRangeProof)* +type ContractRangeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain uint32 `protobuf:"varint,1,opt,name=domain,proto3" json:"domain,omitempty"` // volition + StateRoot *Hash `protobuf:"bytes,2,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + Start *Address `protobuf:"bytes,3,opt,name=start,proto3" json:"start,omitempty"` + End *Address `protobuf:"bytes,4,opt,name=end,proto3" json:"end,omitempty"` + ChunksPerProof uint32 `protobuf:"varint,5,opt,name=chunks_per_proof,json=chunksPerProof,proto3" json:"chunks_per_proof,omitempty"` // how many ContractRange items to send before sending a proof +} + +func (x *ContractRangeRequest) Reset() { + *x = ContractRangeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractRangeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractRangeRequest) ProtoMessage() {} + +func (x *ContractRangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractRangeRequest.ProtoReflect.Descriptor instead. +func (*ContractRangeRequest) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{3} +} + +func (x *ContractRangeRequest) GetDomain() uint32 { + if x != nil { + return x.Domain + } + return 0 +} + +func (x *ContractRangeRequest) GetStateRoot() *Hash { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *ContractRangeRequest) GetStart() *Address { + if x != nil { + return x.Start + } + return nil +} + +func (x *ContractRangeRequest) GetEnd() *Address { + if x != nil { + return x.End + } + return nil +} + +func (x *ContractRangeRequest) GetChunksPerProof() uint32 { + if x != nil { + return x.ChunksPerProof + } + return 0 +} + +// stream of leaves in the contracts tree +type ContractRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State []*ContractState `protobuf:"bytes,1,rep,name=state,proto3" json:"state,omitempty"` +} + +func (x *ContractRange) Reset() { + *x = ContractRange{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractRange) ProtoMessage() {} + +func (x *ContractRange) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractRange.ProtoReflect.Descriptor instead. +func (*ContractRange) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{4} +} + +func (x *ContractRange) GetState() []*ContractState { + if x != nil { + return x.State + } + return nil +} + +type ContractRangeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Root *Hash `protobuf:"bytes,1,opt,name=root,proto3,oneof" json:"root,omitempty"` // may not appear if Fin is sent to end the whole response + ContractsRoot *Hash `protobuf:"bytes,2,opt,name=contracts_root,json=contractsRoot,proto3,oneof" json:"contracts_root,omitempty"` // may not appear if Fin is sent to end the whole response + ClassesRoot *Hash `protobuf:"bytes,3,opt,name=classes_root,json=classesRoot,proto3,oneof" json:"classes_root,omitempty"` // may not appear if Fin is sent to end the whole response + // Types that are assignable to Responses: + // + // *ContractRangeResponse_Range + // *ContractRangeResponse_Fin + Responses isContractRangeResponse_Responses `protobuf_oneof:"responses"` + RangeProof *PatriciaRangeProof `protobuf:"bytes,6,opt,name=range_proof,json=rangeProof,proto3" json:"range_proof,omitempty"` +} + +func (x *ContractRangeResponse) Reset() { + *x = ContractRangeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractRangeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractRangeResponse) ProtoMessage() {} + +func (x *ContractRangeResponse) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractRangeResponse.ProtoReflect.Descriptor instead. +func (*ContractRangeResponse) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{5} +} + +func (x *ContractRangeResponse) GetRoot() *Hash { + if x != nil { + return x.Root + } + return nil +} + +func (x *ContractRangeResponse) GetContractsRoot() *Hash { + if x != nil { + return x.ContractsRoot + } + return nil +} + +func (x *ContractRangeResponse) GetClassesRoot() *Hash { + if x != nil { + return x.ClassesRoot + } + return nil +} + +func (m *ContractRangeResponse) GetResponses() isContractRangeResponse_Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (x *ContractRangeResponse) GetRange() *ContractRange { + if x, ok := x.GetResponses().(*ContractRangeResponse_Range); ok { + return x.Range + } + return nil +} + +func (x *ContractRangeResponse) GetFin() *Fin { + if x, ok := x.GetResponses().(*ContractRangeResponse_Fin); ok { + return x.Fin + } + return nil +} + +func (x *ContractRangeResponse) GetRangeProof() *PatriciaRangeProof { + if x != nil { + return x.RangeProof + } + return nil +} + +type isContractRangeResponse_Responses interface { + isContractRangeResponse_Responses() +} + +type ContractRangeResponse_Range struct { + Range *ContractRange `protobuf:"bytes,4,opt,name=range,proto3,oneof"` +} + +type ContractRangeResponse_Fin struct { + Fin *Fin `protobuf:"bytes,5,opt,name=fin,proto3,oneof"` +} + +func (*ContractRangeResponse_Range) isContractRangeResponse_Responses() {} + +func (*ContractRangeResponse_Fin) isContractRangeResponse_Responses() {} + +// duplicate of GetContractRange. Can introduce a 'type' instead. +// result is (Classes+, PatriciaRangeProof)* +type ClassRangeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Root *Hash `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"` + Start *Hash `protobuf:"bytes,2,opt,name=start,proto3" json:"start,omitempty"` + End *Hash `protobuf:"bytes,3,opt,name=end,proto3" json:"end,omitempty"` + ChunksPerProof uint32 `protobuf:"varint,4,opt,name=chunks_per_proof,json=chunksPerProof,proto3" json:"chunks_per_proof,omitempty"` +} + +func (x *ClassRangeRequest) Reset() { + *x = ClassRangeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClassRangeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClassRangeRequest) ProtoMessage() {} + +func (x *ClassRangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClassRangeRequest.ProtoReflect.Descriptor instead. +func (*ClassRangeRequest) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{6} +} + +func (x *ClassRangeRequest) GetRoot() *Hash { + if x != nil { + return x.Root + } + return nil +} + +func (x *ClassRangeRequest) GetStart() *Hash { + if x != nil { + return x.Start + } + return nil +} + +func (x *ClassRangeRequest) GetEnd() *Hash { + if x != nil { + return x.End + } + return nil +} + +func (x *ClassRangeRequest) GetChunksPerProof() uint32 { + if x != nil { + return x.ChunksPerProof + } + return 0 +} + +type ClassRangeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Root *Hash `protobuf:"bytes,1,opt,name=root,proto3,oneof" json:"root,omitempty"` // may not appear if Fin is sent to end the whole response + ContractsRoot *Hash `protobuf:"bytes,2,opt,name=contracts_root,json=contractsRoot,proto3,oneof" json:"contracts_root,omitempty"` // may not appear if Fin is sent to end the whole response + ClassesRoot *Hash `protobuf:"bytes,3,opt,name=classes_root,json=classesRoot,proto3,oneof" json:"classes_root,omitempty"` // may not appear if Fin is sent to end the whole response + // Types that are assignable to Responses: + // + // *ClassRangeResponse_Classes + // *ClassRangeResponse_Fin + Responses isClassRangeResponse_Responses `protobuf_oneof:"responses"` + RangeProof *PatriciaRangeProof `protobuf:"bytes,6,opt,name=range_proof,json=rangeProof,proto3" json:"range_proof,omitempty"` +} + +func (x *ClassRangeResponse) Reset() { + *x = ClassRangeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClassRangeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClassRangeResponse) ProtoMessage() {} + +func (x *ClassRangeResponse) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClassRangeResponse.ProtoReflect.Descriptor instead. +func (*ClassRangeResponse) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{7} +} + +func (x *ClassRangeResponse) GetRoot() *Hash { + if x != nil { + return x.Root + } + return nil +} + +func (x *ClassRangeResponse) GetContractsRoot() *Hash { + if x != nil { + return x.ContractsRoot + } + return nil +} + +func (x *ClassRangeResponse) GetClassesRoot() *Hash { + if x != nil { + return x.ClassesRoot + } + return nil +} + +func (m *ClassRangeResponse) GetResponses() isClassRangeResponse_Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (x *ClassRangeResponse) GetClasses() *Classes { + if x, ok := x.GetResponses().(*ClassRangeResponse_Classes); ok { + return x.Classes + } + return nil +} + +func (x *ClassRangeResponse) GetFin() *Fin { + if x, ok := x.GetResponses().(*ClassRangeResponse_Fin); ok { + return x.Fin + } + return nil +} + +func (x *ClassRangeResponse) GetRangeProof() *PatriciaRangeProof { + if x != nil { + return x.RangeProof + } + return nil +} + +type isClassRangeResponse_Responses interface { + isClassRangeResponse_Responses() +} + +type ClassRangeResponse_Classes struct { + Classes *Classes `protobuf:"bytes,4,opt,name=classes,proto3,oneof"` +} + +type ClassRangeResponse_Fin struct { + Fin *Fin `protobuf:"bytes,5,opt,name=fin,proto3,oneof"` +} + +func (*ClassRangeResponse_Classes) isClassRangeResponse_Responses() {} + +func (*ClassRangeResponse_Fin) isClassRangeResponse_Responses() {} + +// A position in some contract's state tree is identified by the state tree's root and the key in it +type StorageLeafQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContractStorageRoot *Hash `protobuf:"bytes,1,opt,name=contract_storage_root,json=contractStorageRoot,proto3" json:"contract_storage_root,omitempty"` + Key *Felt252 `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *StorageLeafQuery) Reset() { + *x = StorageLeafQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageLeafQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageLeafQuery) ProtoMessage() {} + +func (x *StorageLeafQuery) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageLeafQuery.ProtoReflect.Descriptor instead. +func (*StorageLeafQuery) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{8} +} + +func (x *StorageLeafQuery) GetContractStorageRoot() *Hash { + if x != nil { + return x.ContractStorageRoot + } + return nil +} + +func (x *StorageLeafQuery) GetKey() *Felt252 { + if x != nil { + return x.Key + } + return nil +} + +type StorageRangeQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address *Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Start *StorageLeafQuery `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *StorageLeafQuery `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *StorageRangeQuery) Reset() { + *x = StorageRangeQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageRangeQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageRangeQuery) ProtoMessage() {} + +func (x *StorageRangeQuery) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageRangeQuery.ProtoReflect.Descriptor instead. +func (*StorageRangeQuery) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{9} +} + +func (x *StorageRangeQuery) GetAddress() *Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *StorageRangeQuery) GetStart() *StorageLeafQuery { + if x != nil { + return x.Start + } + return nil +} + +func (x *StorageRangeQuery) GetEnd() *StorageLeafQuery { + if x != nil { + return x.End + } + return nil +} + +// result is (ContractStorageRange+, PatriciaRangeProof)* +type ContractStorageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain uint32 `protobuf:"varint,1,opt,name=domain,proto3" json:"domain,omitempty"` // volition + StateRoot *Hash `protobuf:"bytes,2,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty"` + ChunksPerProof uint32 `protobuf:"varint,5,opt,name=chunks_per_proof,json=chunksPerProof,proto3" json:"chunks_per_proof,omitempty"` // how many ContractRange items to send before sending a proof + Query []*StorageRangeQuery `protobuf:"bytes,3,rep,name=query,proto3" json:"query,omitempty"` +} + +func (x *ContractStorageRequest) Reset() { + *x = ContractStorageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractStorageRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractStorageRequest) ProtoMessage() {} + +func (x *ContractStorageRequest) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractStorageRequest.ProtoReflect.Descriptor instead. +func (*ContractStorageRequest) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{10} +} + +func (x *ContractStorageRequest) GetDomain() uint32 { + if x != nil { + return x.Domain + } + return 0 +} + +func (x *ContractStorageRequest) GetStateRoot() *Hash { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *ContractStorageRequest) GetChunksPerProof() uint32 { + if x != nil { + return x.ChunksPerProof + } + return 0 +} + +func (x *ContractStorageRequest) GetQuery() []*StorageRangeQuery { + if x != nil { + return x.Query + } + return nil +} + +type ContractStorage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyValue []*ContractStoredValue `protobuf:"bytes,2,rep,name=keyValue,proto3" json:"keyValue,omitempty"` +} + +func (x *ContractStorage) Reset() { + *x = ContractStorage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractStorage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractStorage) ProtoMessage() {} + +func (x *ContractStorage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractStorage.ProtoReflect.Descriptor instead. +func (*ContractStorage) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{11} +} + +func (x *ContractStorage) GetKeyValue() []*ContractStoredValue { + if x != nil { + return x.KeyValue + } + return nil +} + +type ContractStorageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StateRoot *Hash `protobuf:"bytes,1,opt,name=state_root,json=stateRoot,proto3,oneof" json:"state_root,omitempty"` // may not appear if Fin is sent to end the whole response + ContractAddress *Address `protobuf:"bytes,2,opt,name=contract_address,json=contractAddress,proto3,oneof" json:"contract_address,omitempty"` + // Types that are assignable to Responses: + // + // *ContractStorageResponse_Storage + // *ContractStorageResponse_Fin + Responses isContractStorageResponse_Responses `protobuf_oneof:"responses"` + RangeProof *PatriciaRangeProof `protobuf:"bytes,5,opt,name=range_proof,json=rangeProof,proto3" json:"range_proof,omitempty"` +} + +func (x *ContractStorageResponse) Reset() { + *x = ContractStorageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContractStorageResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContractStorageResponse) ProtoMessage() {} + +func (x *ContractStorageResponse) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContractStorageResponse.ProtoReflect.Descriptor instead. +func (*ContractStorageResponse) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{12} +} + +func (x *ContractStorageResponse) GetStateRoot() *Hash { + if x != nil { + return x.StateRoot + } + return nil +} + +func (x *ContractStorageResponse) GetContractAddress() *Address { + if x != nil { + return x.ContractAddress + } + return nil +} + +func (m *ContractStorageResponse) GetResponses() isContractStorageResponse_Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (x *ContractStorageResponse) GetStorage() *ContractStorage { + if x, ok := x.GetResponses().(*ContractStorageResponse_Storage); ok { + return x.Storage + } + return nil +} + +func (x *ContractStorageResponse) GetFin() *Fin { + if x, ok := x.GetResponses().(*ContractStorageResponse_Fin); ok { + return x.Fin + } + return nil +} + +func (x *ContractStorageResponse) GetRangeProof() *PatriciaRangeProof { + if x != nil { + return x.RangeProof + } + return nil +} + +type isContractStorageResponse_Responses interface { + isContractStorageResponse_Responses() +} + +type ContractStorageResponse_Storage struct { + Storage *ContractStorage `protobuf:"bytes,3,opt,name=storage,proto3,oneof"` +} + +type ContractStorageResponse_Fin struct { + Fin *Fin `protobuf:"bytes,4,opt,name=fin,proto3,oneof"` +} + +func (*ContractStorageResponse_Storage) isContractStorageResponse_Responses() {} + +func (*ContractStorageResponse_Fin) isContractStorageResponse_Responses() {} + +type PatriciaNode_Edge struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + Path *Felt252 `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` // as bits of left/right + Value *Felt252 `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *PatriciaNode_Edge) Reset() { + *x = PatriciaNode_Edge{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PatriciaNode_Edge) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatriciaNode_Edge) ProtoMessage() {} + +func (x *PatriciaNode_Edge) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatriciaNode_Edge.ProtoReflect.Descriptor instead. +func (*PatriciaNode_Edge) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *PatriciaNode_Edge) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *PatriciaNode_Edge) GetPath() *Felt252 { + if x != nil { + return x.Path + } + return nil +} + +func (x *PatriciaNode_Edge) GetValue() *Felt252 { + if x != nil { + return x.Value + } + return nil +} + +type PatriciaNode_Binary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Left *Felt252 `protobuf:"bytes,1,opt,name=left,proto3" json:"left,omitempty"` + Right *Felt252 `protobuf:"bytes,2,opt,name=right,proto3" json:"right,omitempty"` +} + +func (x *PatriciaNode_Binary) Reset() { + *x = PatriciaNode_Binary{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_snapshot_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PatriciaNode_Binary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatriciaNode_Binary) ProtoMessage() {} + +func (x *PatriciaNode_Binary) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_snapshot_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatriciaNode_Binary.ProtoReflect.Descriptor instead. +func (*PatriciaNode_Binary) Descriptor() ([]byte, []int) { + return file_p2p_proto_snapshot_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *PatriciaNode_Binary) GetLeft() *Felt252 { + if x != nil { + return x.Left + } + return nil +} + +func (x *PatriciaNode_Binary) GetRight() *Felt252 { + if x != nil { + return x.Right + } + return nil +} + +var File_p2p_proto_snapshot_proto protoreflect.FileDescriptor + +var file_p2p_proto_snapshot_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x70, 0x32, 0x70, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x15, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x70, 0x32, 0x70, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x96, 0x02, 0x0a, 0x0c, 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x4e, 0x6f, 0x64, + 0x65, 0x12, 0x28, 0x0a, 0x04, 0x65, 0x64, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x45, + 0x64, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x65, 0x64, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x50, 0x61, + 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x42, 0x69, 0x6e, 0x61, 0x72, + 0x79, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x1a, 0x5c, 0x0a, 0x04, 0x45, + 0x64, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x46, 0x65, 0x6c, 0x74, + 0x32, 0x35, 0x32, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x46, 0x65, 0x6c, 0x74, 0x32, + 0x35, 0x32, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x46, 0x0a, 0x06, 0x42, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x04, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x08, 0x2e, 0x46, 0x65, 0x6c, 0x74, 0x32, 0x35, 0x32, 0x52, 0x04, 0x6c, 0x65, 0x66, + 0x74, 0x12, 0x1e, 0x0a, 0x05, 0x72, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x08, 0x2e, 0x46, 0x65, 0x6c, 0x74, 0x32, 0x35, 0x32, 0x52, 0x05, 0x72, 0x69, 0x67, 0x68, + 0x74, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x39, 0x0a, 0x12, 0x50, 0x61, 0x74, + 0x72, 0x69, 0x63, 0x69, 0x61, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x23, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, + 0x2e, 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, + 0x6f, 0x64, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x05, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x52, 0x05, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x24, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, + 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x73, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x35, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x22, 0xcb, 0x02, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x04, + 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x48, 0x01, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0e, + 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x02, 0x52, 0x0d, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x2d, 0x0a, 0x0c, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x03, 0x52, 0x0b, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x26, + 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x03, 0x66, 0x69, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x04, 0x2e, 0x46, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x66, 0x69, 0x6e, + 0x12, 0x34, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x11, 0x0a, 0x0f, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x22, 0x8e, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x72, 0x6f, 0x6f, + 0x74, 0x12, 0x1b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x17, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0e, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x22, 0xc6, 0x02, 0x0a, 0x12, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x01, 0x52, + 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x61, 0x63, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x02, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x0c, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x03, 0x52, 0x0b, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x65, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x07, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, + 0x12, 0x18, 0x0a, 0x03, 0x66, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x04, 0x2e, + 0x46, 0x69, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x66, 0x69, 0x6e, 0x12, 0x34, 0x0a, 0x0b, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x42, 0x07, 0x0a, + 0x05, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x65, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x69, 0x0a, 0x10, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x39, + 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1a, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x46, 0x65, 0x6c, 0x74, 0x32, 0x35, 0x32, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x85, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x22, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x27, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x65, 0x61, 0x66, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, + 0x65, 0x61, 0x66, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xaa, 0x01, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x24, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0e, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x50, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x28, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x43, 0x0a, 0x0f, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xad, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x05, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x48, 0x01, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, + 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x08, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x02, 0x52, 0x0f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, + 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x18, + 0x0a, 0x03, 0x66, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x04, 0x2e, 0x46, 0x69, + 0x6e, 0x48, 0x00, 0x52, 0x03, 0x66, 0x69, 0x6e, 0x12, 0x34, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x50, 0x61, 0x74, 0x72, 0x69, 0x63, 0x69, 0x61, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0b, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_p2p_proto_snapshot_proto_rawDescOnce sync.Once + file_p2p_proto_snapshot_proto_rawDescData = file_p2p_proto_snapshot_proto_rawDesc +) + +func file_p2p_proto_snapshot_proto_rawDescGZIP() []byte { + file_p2p_proto_snapshot_proto_rawDescOnce.Do(func() { + file_p2p_proto_snapshot_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2p_proto_snapshot_proto_rawDescData) + }) + return file_p2p_proto_snapshot_proto_rawDescData +} + +var file_p2p_proto_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_p2p_proto_snapshot_proto_goTypes = []any{ + (*PatriciaNode)(nil), // 0: PatriciaNode + (*PatriciaRangeProof)(nil), // 1: PatriciaRangeProof + (*ContractState)(nil), // 2: ContractState + (*ContractRangeRequest)(nil), // 3: ContractRangeRequest + (*ContractRange)(nil), // 4: ContractRange + (*ContractRangeResponse)(nil), // 5: ContractRangeResponse + (*ClassRangeRequest)(nil), // 6: ClassRangeRequest + (*ClassRangeResponse)(nil), // 7: ClassRangeResponse + (*StorageLeafQuery)(nil), // 8: StorageLeafQuery + (*StorageRangeQuery)(nil), // 9: StorageRangeQuery + (*ContractStorageRequest)(nil), // 10: ContractStorageRequest + (*ContractStorage)(nil), // 11: ContractStorage + (*ContractStorageResponse)(nil), // 12: ContractStorageResponse + (*PatriciaNode_Edge)(nil), // 13: PatriciaNode.Edge + (*PatriciaNode_Binary)(nil), // 14: PatriciaNode.Binary + (*Address)(nil), // 15: Address + (*Hash)(nil), // 16: Hash + (*Fin)(nil), // 17: Fin + (*Classes)(nil), // 18: Classes + (*Felt252)(nil), // 19: Felt252 + (*ContractStoredValue)(nil), // 20: ContractStoredValue +} +var file_p2p_proto_snapshot_proto_depIdxs = []int32{ + 13, // 0: PatriciaNode.edge:type_name -> PatriciaNode.Edge + 14, // 1: PatriciaNode.binary:type_name -> PatriciaNode.Binary + 0, // 2: PatriciaRangeProof.nodes:type_name -> PatriciaNode + 15, // 3: ContractState.address:type_name -> Address + 16, // 4: ContractState.class:type_name -> Hash + 16, // 5: ContractState.storage:type_name -> Hash + 16, // 6: ContractRangeRequest.state_root:type_name -> Hash + 15, // 7: ContractRangeRequest.start:type_name -> Address + 15, // 8: ContractRangeRequest.end:type_name -> Address + 2, // 9: ContractRange.state:type_name -> ContractState + 16, // 10: ContractRangeResponse.root:type_name -> Hash + 16, // 11: ContractRangeResponse.contracts_root:type_name -> Hash + 16, // 12: ContractRangeResponse.classes_root:type_name -> Hash + 4, // 13: ContractRangeResponse.range:type_name -> ContractRange + 17, // 14: ContractRangeResponse.fin:type_name -> Fin + 1, // 15: ContractRangeResponse.range_proof:type_name -> PatriciaRangeProof + 16, // 16: ClassRangeRequest.root:type_name -> Hash + 16, // 17: ClassRangeRequest.start:type_name -> Hash + 16, // 18: ClassRangeRequest.end:type_name -> Hash + 16, // 19: ClassRangeResponse.root:type_name -> Hash + 16, // 20: ClassRangeResponse.contracts_root:type_name -> Hash + 16, // 21: ClassRangeResponse.classes_root:type_name -> Hash + 18, // 22: ClassRangeResponse.classes:type_name -> Classes + 17, // 23: ClassRangeResponse.fin:type_name -> Fin + 1, // 24: ClassRangeResponse.range_proof:type_name -> PatriciaRangeProof + 16, // 25: StorageLeafQuery.contract_storage_root:type_name -> Hash + 19, // 26: StorageLeafQuery.key:type_name -> Felt252 + 15, // 27: StorageRangeQuery.address:type_name -> Address + 8, // 28: StorageRangeQuery.start:type_name -> StorageLeafQuery + 8, // 29: StorageRangeQuery.end:type_name -> StorageLeafQuery + 16, // 30: ContractStorageRequest.state_root:type_name -> Hash + 9, // 31: ContractStorageRequest.query:type_name -> StorageRangeQuery + 20, // 32: ContractStorage.keyValue:type_name -> ContractStoredValue + 16, // 33: ContractStorageResponse.state_root:type_name -> Hash + 15, // 34: ContractStorageResponse.contract_address:type_name -> Address + 11, // 35: ContractStorageResponse.storage:type_name -> ContractStorage + 17, // 36: ContractStorageResponse.fin:type_name -> Fin + 1, // 37: ContractStorageResponse.range_proof:type_name -> PatriciaRangeProof + 19, // 38: PatriciaNode.Edge.path:type_name -> Felt252 + 19, // 39: PatriciaNode.Edge.value:type_name -> Felt252 + 19, // 40: PatriciaNode.Binary.left:type_name -> Felt252 + 19, // 41: PatriciaNode.Binary.right:type_name -> Felt252 + 42, // [42:42] is the sub-list for method output_type + 42, // [42:42] is the sub-list for method input_type + 42, // [42:42] is the sub-list for extension type_name + 42, // [42:42] is the sub-list for extension extendee + 0, // [0:42] is the sub-list for field type_name +} + +func init() { file_p2p_proto_snapshot_proto_init() } +func file_p2p_proto_snapshot_proto_init() { + if File_p2p_proto_snapshot_proto != nil { + return + } + file_p2p_proto_common_proto_init() + file_p2p_proto_state_proto_init() + file_p2p_proto_class_proto_init() + if !protoimpl.UnsafeEnabled { + file_p2p_proto_snapshot_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*PatriciaNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*PatriciaRangeProof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ContractState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*ContractRangeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ContractRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ContractRangeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ClassRangeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ClassRangeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*StorageLeafQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*StorageRangeQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*ContractStorageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*ContractStorage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*ContractStorageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*PatriciaNode_Edge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_snapshot_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*PatriciaNode_Binary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_p2p_proto_snapshot_proto_msgTypes[0].OneofWrappers = []any{ + (*PatriciaNode_Edge_)(nil), + (*PatriciaNode_Binary_)(nil), + } + file_p2p_proto_snapshot_proto_msgTypes[5].OneofWrappers = []any{ + (*ContractRangeResponse_Range)(nil), + (*ContractRangeResponse_Fin)(nil), + } + file_p2p_proto_snapshot_proto_msgTypes[7].OneofWrappers = []any{ + (*ClassRangeResponse_Classes)(nil), + (*ClassRangeResponse_Fin)(nil), + } + file_p2p_proto_snapshot_proto_msgTypes[12].OneofWrappers = []any{ + (*ContractStorageResponse_Storage)(nil), + (*ContractStorageResponse_Fin)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_p2p_proto_snapshot_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_p2p_proto_snapshot_proto_goTypes, + DependencyIndexes: file_p2p_proto_snapshot_proto_depIdxs, + MessageInfos: file_p2p_proto_snapshot_proto_msgTypes, + }.Build() + File_p2p_proto_snapshot_proto = out.File + file_p2p_proto_snapshot_proto_rawDesc = nil + file_p2p_proto_snapshot_proto_goTypes = nil + file_p2p_proto_snapshot_proto_depIdxs = nil +} diff --git a/p2p/starknet/spec/state.pb.go b/p2p/starknet/spec/state.pb.go index c230a042b3..a165d3d680 100644 --- a/p2p/starknet/spec/state.pb.go +++ b/p2p/starknet/spec/state.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/state.proto package spec diff --git a/p2p/starknet/spec/transaction.pb.go b/p2p/starknet/spec/transaction.pb.go index ecd18338b3..3aa5519f2a 100644 --- a/p2p/starknet/spec/transaction.pb.go +++ b/p2p/starknet/spec/transaction.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.34.2 -// protoc v5.27.1 +// protoc v3.17.3 // source: p2p/proto/transaction.proto package spec diff --git a/p2p/starknetdata.go b/p2p/starknetdata.go new file mode 100644 index 0000000000..4af4717827 --- /dev/null +++ b/p2p/starknetdata.go @@ -0,0 +1,66 @@ +package p2p + +import ( + "context" + + "github.com/NethermindEth/juno/core" + "github.com/NethermindEth/juno/core/felt" + "github.com/NethermindEth/juno/starknetdata" + "github.com/ethereum/go-ethereum/log" +) + +type MockStarkData struct{} + +var _ starknetdata.StarknetData = (*MockStarkData)(nil) + +func (m MockStarkData) BlockByNumber(ctx context.Context, blockNumber uint64) (*core.Block, error) { + log.Info("BlockByNumber", "blockNumber", blockNumber) + return m.BlockLatest(ctx) +} + +func (m MockStarkData) BlockLatest(ctx context.Context) (*core.Block, error) { + // This is snapshot I have + root, _ := (&felt.Felt{}).SetString("0x472e84b65d387c9364b5117f4afaba3fb88897db1f28867b398506e2af89f25") + + return &core.Block{ + Header: &core.Header{ + Number: uint64(66477), //nolint:mnd // TODO: remove this, only use temporarily for snap sync + GlobalStateRoot: root, + }, + }, nil +} + +func (m MockStarkData) BlockPending(ctx context.Context) (*core.Block, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) Transaction(ctx context.Context, transactionHash *felt.Felt) (core.Transaction, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) Class(ctx context.Context, classHash *felt.Felt) (core.Class, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) StateUpdate(ctx context.Context, blockNumber uint64) (*core.StateUpdate, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) StateUpdatePending(ctx context.Context) (*core.StateUpdate, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) StateUpdateWithBlock(ctx context.Context, blockNumber uint64) (*core.StateUpdate, *core.Block, error) { + // TODO implement me + panic("implement me") +} + +func (m MockStarkData) StateUpdatePendingWithBlock(ctx context.Context) (*core.StateUpdate, *core.Block, error) { + // TODO implement me + panic("implement me") +} diff --git a/p2p/sync.go b/p2p/sync.go index 2b2c79b0e6..c0a1543251 100644 --- a/p2p/sync.go +++ b/p2p/sync.go @@ -26,7 +26,7 @@ import ( "go.uber.org/zap" ) -type syncService struct { +type SyncService struct { host host.Host network *utils.Network client *starknet.Client // todo: merge all the functionality of Client with p2p SyncService @@ -36,22 +36,28 @@ type syncService struct { log utils.SimpleLogger } -func newSyncService(bc *blockchain.Blockchain, h host.Host, n *utils.Network, log utils.SimpleLogger) *syncService { - return &syncService{ +func newSyncService(bc *blockchain.Blockchain, h host.Host, n *utils.Network, log utils.SimpleLogger) *SyncService { + s := &SyncService{ host: h, network: n, blockchain: bc, log: log, listener: &junoSync.SelectiveListener{}, } + + s.client = starknet.NewClient(s.randomPeerStream, s.network, s.log) + + return s } -func (s *syncService) start(ctx context.Context) { +func (s *SyncService) Client() *starknet.Client { + return s.client +} + +func (s *SyncService) Start(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() - s.client = starknet.NewClient(s.randomPeerStream, s.network, s.log) - for i := 0; ; i++ { if err := ctx.Err(); err != nil { break @@ -80,7 +86,7 @@ func (s *syncService) start(ctx context.Context) { } } -func (s *syncService) getNextHeight() (int, error) { +func (s *SyncService) getNextHeight() (int, error) { curHeight, err := s.blockchain.Height() if err == nil { return int(curHeight) + 1, nil @@ -90,7 +96,7 @@ func (s *syncService) getNextHeight() (int, error) { return 0, err } -func (s *syncService) processBlock(ctx context.Context, blockNumber uint64) error { +func (s *SyncService) processBlock(ctx context.Context, blockNumber uint64) error { headersAndSigsCh, err := s.genHeadersAndSigs(ctx, blockNumber) if err != nil { return fmt.Errorf("failed to get block headers parts: %w", err) @@ -145,7 +151,7 @@ func specBlockPartsFunc[T specBlockHeaderAndSigs | specTxWithReceipts | specEven return specBlockParts(i) } -func (s *syncService) logError(msg string, err error) { +func (s *SyncService) logError(msg string, err error) { if !errors.Is(err, context.Canceled) { var log utils.SimpleLogger if v, ok := s.log.(*utils.ZapLogger); ok { @@ -171,7 +177,7 @@ type blockBody struct { } //nolint:gocyclo -func (s *syncService) processSpecBlockParts( +func (s *SyncService) processSpecBlockParts( ctx context.Context, startingBlockNum uint64, specBlockPartsCh <-chan specBlockParts, ) <-chan <-chan blockBody { orderedBlockBodiesCh := make(chan (<-chan blockBody)) @@ -264,7 +270,7 @@ func (s *syncService) processSpecBlockParts( } //nolint:gocyclo -func (s *syncService) adaptAndSanityCheckBlock(ctx context.Context, header *spec.SignedBlockHeader, contractDiffs []*spec.ContractDiff, +func (s *SyncService) adaptAndSanityCheckBlock(ctx context.Context, header *spec.SignedBlockHeader, contractDiffs []*spec.ContractDiff, classes []*spec.Class, txs []*spec.Transaction, receipts []*spec.Receipt, events []*spec.Event, prevBlockRoot *felt.Felt, ) <-chan blockBody { bodyCh := make(chan blockBody) @@ -393,7 +399,7 @@ func (s specBlockHeaderAndSigs) blockNumber() uint64 { return s.header.Number } -func (s *syncService) genHeadersAndSigs(ctx context.Context, blockNumber uint64) (<-chan specBlockHeaderAndSigs, error) { +func (s *SyncService) genHeadersAndSigs(ctx context.Context, blockNumber uint64) (<-chan specBlockHeaderAndSigs, error) { it := s.createIteratorForBlock(blockNumber) headersIt, err := s.client.RequestBlockHeaders(ctx, &spec.BlockHeadersRequest{Iteration: it}) if err != nil { @@ -437,7 +443,7 @@ func (s specClasses) blockNumber() uint64 { return s.number } -func (s *syncService) genClasses(ctx context.Context, blockNumber uint64) (<-chan specClasses, error) { +func (s *SyncService) genClasses(ctx context.Context, blockNumber uint64) (<-chan specClasses, error) { it := s.createIteratorForBlock(blockNumber) classesIt, err := s.client.RequestClasses(ctx, &spec.ClassesRequest{Iteration: it}) if err != nil { @@ -483,7 +489,7 @@ func (s specContractDiffs) blockNumber() uint64 { return s.number } -func (s *syncService) genStateDiffs(ctx context.Context, blockNumber uint64) (<-chan specContractDiffs, error) { +func (s *SyncService) genStateDiffs(ctx context.Context, blockNumber uint64) (<-chan specContractDiffs, error) { it := s.createIteratorForBlock(blockNumber) stateDiffsIt, err := s.client.RequestStateDiffs(ctx, &spec.StateDiffsRequest{Iteration: it}) if err != nil { @@ -531,7 +537,7 @@ func (s specEvents) blockNumber() uint64 { return s.number } -func (s *syncService) genEvents(ctx context.Context, blockNumber uint64) (<-chan specEvents, error) { +func (s *SyncService) genEvents(ctx context.Context, blockNumber uint64) (<-chan specEvents, error) { it := s.createIteratorForBlock(blockNumber) eventsIt, err := s.client.RequestEvents(ctx, &spec.EventsRequest{Iteration: it}) if err != nil { @@ -578,7 +584,7 @@ func (s specTxWithReceipts) blockNumber() uint64 { return s.number } -func (s *syncService) genTransactions(ctx context.Context, blockNumber uint64) (<-chan specTxWithReceipts, error) { +func (s *SyncService) genTransactions(ctx context.Context, blockNumber uint64) (<-chan specTxWithReceipts, error) { it := s.createIteratorForBlock(blockNumber) txsIt, err := s.client.RequestTransactions(ctx, &spec.TransactionsRequest{Iteration: it}) if err != nil { @@ -624,7 +630,7 @@ func (s *syncService) genTransactions(ctx context.Context, blockNumber uint64) ( return txsCh, nil } -func (s *syncService) randomPeer() peer.ID { +func (s *SyncService) randomPeer() peer.ID { store := s.host.Peerstore() // todo do not request same block from all peers peers := utils.Filter(store.Peers(), func(peerID peer.ID) bool { @@ -644,7 +650,7 @@ func (s *syncService) randomPeer() peer.ID { var errNoPeers = errors.New("no peers available") -func (s *syncService) randomPeerStream(ctx context.Context, pids ...protocol.ID) (network.Stream, error) { +func (s *SyncService) randomPeerStream(ctx context.Context, pids ...protocol.ID) (network.Stream, error) { randPeer := s.randomPeer() if randPeer == "" { return nil, errNoPeers @@ -658,13 +664,13 @@ func (s *syncService) randomPeerStream(ctx context.Context, pids ...protocol.ID) return stream, err } -func (s *syncService) removePeer(id peer.ID) { +func (s *SyncService) removePeer(id peer.ID) { s.log.Debugw("Removing peer", "peerID", id) s.host.Peerstore().RemovePeer(id) s.host.Peerstore().ClearAddrs(id) } -func (s *syncService) createIteratorForBlock(blockNumber uint64) *spec.Iteration { +func (s *SyncService) createIteratorForBlock(blockNumber uint64) *spec.Iteration { return &spec.Iteration{ Start: &spec.Iteration_BlockNumber{BlockNumber: blockNumber}, Direction: spec.Iteration_Forward, @@ -673,12 +679,12 @@ func (s *syncService) createIteratorForBlock(blockNumber uint64) *spec.Iteration } } -func (s *syncService) WithListener(l junoSync.EventListener) { +func (s *SyncService) WithListener(l junoSync.EventListener) { s.listener = l } //nolint:unused -func (s *syncService) sleep(d time.Duration) { +func (s *SyncService) sleep(d time.Duration) { s.log.Debugw("Sleeping...", "for", d) time.Sleep(d) } diff --git a/rpc/events_test.go b/rpc/events_test.go index 7f8b483987..8eb54a3f4e 100644 --- a/rpc/events_test.go +++ b/rpc/events_test.go @@ -28,6 +28,7 @@ func TestEvents(t *testing.T) { testDB := pebble.NewMemTest(t) n := utils.Ptr(utils.Sepolia) chain := blockchain.New(testDB, n) + defer chain.Close() client := feeder.NewTestClient(t, n) gw := adaptfeeder.New(client) @@ -238,6 +239,7 @@ func TestSubscribeNewHeadsAndUnsubscribe(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) chain := blockchain.New(pebble.NewMemTest(t), n) + defer chain.Close() syncer := sync.New(chain, gw, log, 0, false) handler := rpc.New(chain, syncer, nil, "", log) @@ -319,6 +321,7 @@ func TestMultipleSubscribeNewHeadsAndUnsubscribe(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) chain := blockchain.New(pebble.NewMemTest(t), n) + defer chain.Close() syncer := sync.New(chain, gw, log, 0, false) handler := rpc.New(chain, syncer, nil, "", log) go func() { diff --git a/sync/sync_test.go b/sync/sync_test.go index 4f1d8a096a..f579840843 100644 --- a/sync/sync_test.go +++ b/sync/sync_test.go @@ -49,6 +49,7 @@ func TestSyncBlocks(t *testing.T) { assert.Equal(t, b, block) height-- } + bc.Close() return nil }()) } @@ -147,6 +148,7 @@ func TestReorg(t *testing.T) { // sync to integration for 2 blocks bc := blockchain.New(testDB, &utils.Integration) + defer bc.Close() synchronizer := sync.New(bc, integGw, utils.NewNopZapLogger(), 0, false) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -155,6 +157,7 @@ func TestReorg(t *testing.T) { t.Run("resync to mainnet with the same db", func(t *testing.T) { bc := blockchain.New(testDB, &utils.Mainnet) + defer bc.Close() // Ensure current head is Integration head head, err := bc.HeadsHeader() @@ -182,6 +185,7 @@ func TestPending(t *testing.T) { testDB := pebble.NewMemTest(t) log := utils.NewNopZapLogger() bc := blockchain.New(testDB, &utils.Mainnet) + defer bc.Close() synchronizer := sync.New(bc, gw, log, time.Millisecond*100, false) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -201,6 +205,7 @@ func TestSubscribeNewHeads(t *testing.T) { log := utils.NewNopZapLogger() integration := utils.Integration chain := blockchain.New(testDB, &integration) + defer chain.Close() integrationClient := feeder.NewTestClient(t, &integration) gw := adaptfeeder.New(integrationClient) syncer := sync.New(chain, gw, log, 0, false) diff --git a/utils/log.go b/utils/log.go index 3d88ef3cbd..6f0d02bf72 100644 --- a/utils/log.go +++ b/utils/log.go @@ -121,7 +121,10 @@ func (l *ZapLogger) Tracew(msg string, keysAndValues ...interface{}) { } } -var _ Logger = (*ZapLogger)(nil) +var ( + _ Logger = (*ZapLogger)(nil) + _ SimpleLogger = (*ZapLogger)(nil) +) func NewNopZapLogger() *ZapLogger { return &ZapLogger{zap.NewNop().Sugar()}