From 195dcf41a9dad66b9e67f90048974a22bb3b011d Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Fri, 18 Aug 2023 09:51:58 -0400 Subject: [PATCH] [DVT-920] add nodelist cmd (#115) * add nodelist cmd * update desc * update docs * move around commands * update docs * export entity kinds * fix mark flag required * fix logging * move files around again * update docs * remove extra docs --- cmd/p2p/nodelist/nodelist.go | 62 +++++++++++++++++++++ cmd/p2p/p2p.go | 4 +- cmd/p2p/sensor/sensor.go | 54 +++++++++--------- doc/polycli_p2p.md | 2 + doc/polycli_p2p_nodelist.md | 44 +++++++++++++++ doc/polycli_p2p_sensor.md | 60 ++++++++++---------- p2p/database/database.go | 3 + p2p/database/datastore.go | 104 ++++++++++++++++++++++------------- 8 files changed, 238 insertions(+), 95 deletions(-) create mode 100644 cmd/p2p/nodelist/nodelist.go create mode 100644 doc/polycli_p2p_nodelist.md diff --git a/cmd/p2p/nodelist/nodelist.go b/cmd/p2p/nodelist/nodelist.go new file mode 100644 index 00000000..4b7f8d58 --- /dev/null +++ b/cmd/p2p/nodelist/nodelist.go @@ -0,0 +1,62 @@ +package nodelist + +import ( + "encoding/json" + "os" + + "github.com/maticnetwork/polygon-cli/p2p/database" + "github.com/spf13/cobra" +) + +const jsonIndent = " " + +type ( + nodeListParams struct { + ProjectID string + OutputFile string + Limit int + } +) + +var ( + inputNodeListParams nodeListParams +) + +var NodeListCmd = &cobra.Command{ + Use: "nodelist [nodes.json]", + Short: "Generate a node list to seed a node", + Args: cobra.MinimumNArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) (err error) { + inputNodeListParams.OutputFile = args[0] + inputNodeListParams.ProjectID, err = cmd.Flags().GetString("project-id") + return err + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + db := database.NewDatastore(cmd.Context(), database.DatastoreOptions{ + ProjectID: inputNodeListParams.ProjectID, + }) + + nodes, err := db.NodeList(ctx, inputNodeListParams.Limit) + if err != nil { + return err + } + + bytes, err := json.MarshalIndent(nodes, "", jsonIndent) + if err != nil { + return err + } + + if err = os.WriteFile(inputNodeListParams.OutputFile, bytes, 0644); err != nil { + return err + } + + return nil + }, +} + +func init() { + NodeListCmd.PersistentFlags().IntVarP(&inputNodeListParams.Limit, "limit", "l", 100, "Number of unique nodes to return") + NodeListCmd.PersistentFlags().StringVarP(&inputNodeListParams.ProjectID, "project-id", "p", "", "GCP project ID") +} diff --git a/cmd/p2p/p2p.go b/cmd/p2p/p2p.go index ab05cb6d..320b39f8 100644 --- a/cmd/p2p/p2p.go +++ b/cmd/p2p/p2p.go @@ -6,6 +6,7 @@ import ( _ "embed" "github.com/maticnetwork/polygon-cli/cmd/p2p/crawl" + "github.com/maticnetwork/polygon-cli/cmd/p2p/nodelist" "github.com/maticnetwork/polygon-cli/cmd/p2p/ping" "github.com/maticnetwork/polygon-cli/cmd/p2p/sensor" ) @@ -20,7 +21,8 @@ var P2pCmd = &cobra.Command{ } func init() { - P2pCmd.AddCommand(sensor.SensorCmd) P2pCmd.AddCommand(crawl.CrawlCmd) + P2pCmd.AddCommand(nodelist.NodeListCmd) P2pCmd.AddCommand(ping.PingCmd) + P2pCmd.AddCommand(sensor.SensorCmd) } diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index db73f076..49ce7600 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -38,7 +38,7 @@ type ( ProjectID string SensorID string MaxPeers int - MaxConcurrentDatabaseWrites int + MaxDatabaseConcurrency int ShouldWriteBlocks bool ShouldWriteBlockEvents bool ShouldWriteTransactions bool @@ -147,7 +147,7 @@ var SensorCmd = &cobra.Command{ db := database.NewDatastore(cmd.Context(), database.DatastoreOptions{ ProjectID: inputSensorParams.ProjectID, SensorID: inputSensorParams.SensorID, - MaxConcurrentWrites: inputSensorParams.MaxConcurrentDatabaseWrites, + MaxConcurrency: inputSensorParams.MaxDatabaseConcurrency, ShouldWriteBlocks: inputSensorParams.ShouldWriteBlocks, ShouldWriteBlockEvents: inputSensorParams.ShouldWriteBlockEvents, ShouldWriteTransactions: inputSensorParams.ShouldWriteTransactions, @@ -279,44 +279,44 @@ func getLatestBlock(url string) (*rpctypes.RawBlockResponse, error) { } func init() { - SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.Bootnodes, "bootnodes", "b", "", "Comma separated nodes used for bootstrapping") - SensorCmd.PersistentFlags().Uint64VarP(&inputSensorParams.NetworkID, "network-id", "n", 0, "Filter discovered nodes by this network ID") - if err := SensorCmd.MarkPersistentFlagRequired("network-id"); err != nil { + SensorCmd.Flags().StringVarP(&inputSensorParams.Bootnodes, "bootnodes", "b", "", "Comma separated nodes used for bootstrapping") + SensorCmd.Flags().Uint64VarP(&inputSensorParams.NetworkID, "network-id", "n", 0, "Filter discovered nodes by this network ID") + if err := SensorCmd.MarkFlagRequired("network-id"); err != nil { log.Error().Err(err).Msg("Failed to mark network-id as required persistent flag") } - SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.ProjectID, "project-id", "P", "", "GCP project ID") - SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.SensorID, "sensor-id", "s", "", "Sensor ID when writing block/tx events") - if err := SensorCmd.MarkPersistentFlagRequired("sensor-id"); err != nil { + SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.ProjectID, "project-id", "p", "", "GCP project ID") + SensorCmd.Flags().StringVarP(&inputSensorParams.SensorID, "sensor-id", "s", "", "Sensor ID when writing block/tx events") + if err := SensorCmd.MarkFlagRequired("sensor-id"); err != nil { log.Error().Err(err).Msg("Failed to mark sensor-id as required persistent flag") } - SensorCmd.PersistentFlags().IntVarP(&inputSensorParams.MaxPeers, "max-peers", "m", 200, "Maximum number of peers to connect to") - SensorCmd.PersistentFlags().IntVarP(&inputSensorParams.MaxConcurrentDatabaseWrites, "max-db-writes", "D", 10000, - `Maximum number of concurrent database writes to perform. Increasing this + SensorCmd.Flags().IntVarP(&inputSensorParams.MaxPeers, "max-peers", "m", 200, "Maximum number of peers to connect to") + SensorCmd.Flags().IntVarP(&inputSensorParams.MaxDatabaseConcurrency, "max-db-concurrency", "D", 10000, + `Maximum number of concurrent database operations to perform. Increasing this will result in less chance of missing data (i.e. broken pipes) but can significantly increase memory usage.`) - SensorCmd.PersistentFlags().BoolVarP(&inputSensorParams.ShouldWriteBlocks, "write-blocks", "B", true, "Whether to write blocks to the database") - SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldWriteBlockEvents, "write-block-events", true, "Whether to write block events to the database") - SensorCmd.PersistentFlags().BoolVarP(&inputSensorParams.ShouldWriteTransactions, "write-txs", "t", true, + SensorCmd.Flags().BoolVarP(&inputSensorParams.ShouldWriteBlocks, "write-blocks", "B", true, "Whether to write blocks to the database") + SensorCmd.Flags().BoolVar(&inputSensorParams.ShouldWriteBlockEvents, "write-block-events", true, "Whether to write block events to the database") + SensorCmd.Flags().BoolVarP(&inputSensorParams.ShouldWriteTransactions, "write-txs", "t", true, `Whether to write transactions to the database. This option could significantly increase CPU and memory usage.`) - SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, + SensorCmd.Flags().BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, `Whether to write transaction events to the database. This option could significantly increase CPU and memory usage.`) - SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldRunPprof, "pprof", false, "Whether to run pprof") - SensorCmd.PersistentFlags().UintVar(&inputSensorParams.PprofPort, "pprof-port", 6060, "Port pprof runs on") - SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.KeyFile, "key-file", "k", "", "Private key file") - SensorCmd.PersistentFlags().IntVar(&inputSensorParams.Port, "port", 30303, "TCP network listening port") - SensorCmd.PersistentFlags().IntVar(&inputSensorParams.DiscoveryPort, "discovery-port", 30303, "UDP P2P discovery port") - SensorCmd.PersistentFlags().StringVar(&inputSensorParams.RPC, "rpc", "https://polygon-rpc.com", "RPC endpoint used to fetch the latest block") - SensorCmd.PersistentFlags().StringVar(&inputSensorParams.GenesisFile, "genesis", "genesis.json", "Genesis file") - SensorCmd.PersistentFlags().StringVar(&inputSensorParams.GenesisHash, "genesis-hash", "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b", "The genesis block hash") - SensorCmd.PersistentFlags().IntVar(&inputSensorParams.DialRatio, "dial-ratio", 0, + SensorCmd.Flags().BoolVar(&inputSensorParams.ShouldRunPprof, "pprof", false, "Whether to run pprof") + SensorCmd.Flags().UintVar(&inputSensorParams.PprofPort, "pprof-port", 6060, "Port pprof runs on") + SensorCmd.Flags().StringVarP(&inputSensorParams.KeyFile, "key-file", "k", "", "Private key file") + SensorCmd.Flags().IntVar(&inputSensorParams.Port, "port", 30303, "TCP network listening port") + SensorCmd.Flags().IntVar(&inputSensorParams.DiscoveryPort, "discovery-port", 30303, "UDP P2P discovery port") + SensorCmd.Flags().StringVar(&inputSensorParams.RPC, "rpc", "https://polygon-rpc.com", "RPC endpoint used to fetch the latest block") + SensorCmd.Flags().StringVar(&inputSensorParams.GenesisFile, "genesis", "genesis.json", "Genesis file") + SensorCmd.Flags().StringVar(&inputSensorParams.GenesisHash, "genesis-hash", "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b", "The genesis block hash") + SensorCmd.Flags().IntVar(&inputSensorParams.DialRatio, "dial-ratio", 0, `Ratio of inbound to dialed connections. A dial ratio of 2 allows 1/2 of connections to be dialed. Setting this to 0 defaults it to 3.`) - SensorCmd.PersistentFlags().StringVar(&inputSensorParams.NAT, "nat", "any", "NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:)") - SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.QuickStart, "quick-start", false, + SensorCmd.Flags().StringVar(&inputSensorParams.NAT, "nat", "any", "NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:)") + SensorCmd.Flags().BoolVar(&inputSensorParams.QuickStart, "quick-start", false, `Whether to load the nodes.json as static nodes to quickly start the network. This produces faster development cycles but can prevent the sensor from being to connect to new peers if the nodes.json file is large.`) - SensorCmd.PersistentFlags().StringVar(&inputSensorParams.TrustedNodesFile, "trusted-nodes", "", "Trusted nodes file") + SensorCmd.Flags().StringVar(&inputSensorParams.TrustedNodesFile, "trusted-nodes", "", "Trusted nodes file") } diff --git a/doc/polycli_p2p.md b/doc/polycli_p2p.md index 9e2944f2..a695bd0e 100644 --- a/doc/polycli_p2p.md +++ b/doc/polycli_p2p.md @@ -58,6 +58,8 @@ The command also inherits flags from parent commands. - [polycli](polycli.md) - A Swiss Army knife of blockchain tools. - [polycli p2p crawl](polycli_p2p_crawl.md) - Crawl a network on the devp2p layer and generate a nodes JSON file. +- [polycli p2p nodelist](polycli_p2p_nodelist.md) - Generate a node list to seed a node + - [polycli p2p ping](polycli_p2p_ping.md) - Ping node(s) and return the output. - [polycli p2p sensor](polycli_p2p_sensor.md) - Start a devp2p sensor that discovers other peers and will receive blocks and transactions. diff --git a/doc/polycli_p2p_nodelist.md b/doc/polycli_p2p_nodelist.md new file mode 100644 index 00000000..51a5d533 --- /dev/null +++ b/doc/polycli_p2p_nodelist.md @@ -0,0 +1,44 @@ +# `polycli p2p nodelist` + +> Auto-generated documentation. + +## Table of Contents + +- [Description](#description) +- [Usage](#usage) +- [Flags](#flags) +- [See Also](#see-also) + +## Description + +Generate a node list to seed a node + +```bash +polycli p2p nodelist [nodes.json] [flags] +``` + +## Flags + +```bash + -h, --help help for nodelist + -l, --limit int Number of unique nodes to return (default 100) + -p, --project-id string GCP project ID +``` + +The command also inherits flags from parent commands. + +```bash + --config string config file (default is $HOME/.polygon-cli.yaml) + --pretty-logs Should logs be in pretty format or JSON (default true) + -v, --verbosity int 0 - Silent + 100 Fatal + 200 Error + 300 Warning + 400 Info + 500 Debug + 600 Trace (default 400) +``` + +## See also + +- [polycli p2p](polycli_p2p.md) - Set of commands related to devp2p. diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index 91f54c76..dff4c356 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -23,36 +23,36 @@ If no nodes.json file exists, it will be created. ## Flags ```bash - -b, --bootnodes string Comma separated nodes used for bootstrapping - --dial-ratio int Ratio of inbound to dialed connections. A dial ratio of 2 allows 1/2 of - connections to be dialed. Setting this to 0 defaults it to 3. - --discovery-port int UDP P2P discovery port (default 30303) - --genesis string Genesis file (default "genesis.json") - --genesis-hash string The genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - -k, --key-file string Private key file - -D, --max-db-writes int Maximum number of concurrent database writes to perform. Increasing this - will result in less chance of missing data (i.e. broken pipes) but can - significantly increase memory usage. (default 10000) - -m, --max-peers int Maximum number of peers to connect to (default 200) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint Filter discovered nodes by this network ID - --port int TCP network listening port (default 30303) - --pprof Whether to run pprof - --pprof-port uint Port pprof runs on (default 6060) - -P, --project-id string GCP project ID - --quick-start Whether to load the nodes.json as static nodes to quickly start the network. - This produces faster development cycles but can prevent the sensor from being to - connect to new peers if the nodes.json file is large. - --rpc string RPC endpoint used to fetch the latest block (default "https://polygon-rpc.com") - -s, --sensor-id string Sensor ID when writing block/tx events - --trusted-nodes string Trusted nodes file - --write-block-events Whether to write block events to the database (default true) - -B, --write-blocks Whether to write blocks to the database (default true) - --write-tx-events Whether to write transaction events to the database. This option could - significantly increase CPU and memory usage. (default true) - -t, --write-txs Whether to write transactions to the database. This option could significantly - increase CPU and memory usage. (default true) + -b, --bootnodes string Comma separated nodes used for bootstrapping + --dial-ratio int Ratio of inbound to dialed connections. A dial ratio of 2 allows 1/2 of + connections to be dialed. Setting this to 0 defaults it to 3. + --discovery-port int UDP P2P discovery port (default 30303) + --genesis string Genesis file (default "genesis.json") + --genesis-hash string The genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + -k, --key-file string Private key file + -D, --max-db-concurrency int Maximum number of concurrent database operations to perform. Increasing this + will result in less chance of missing data (i.e. broken pipes) but can + significantly increase memory usage. (default 10000) + -m, --max-peers int Maximum number of peers to connect to (default 200) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint Filter discovered nodes by this network ID + --port int TCP network listening port (default 30303) + --pprof Whether to run pprof + --pprof-port uint Port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --quick-start Whether to load the nodes.json as static nodes to quickly start the network. + This produces faster development cycles but can prevent the sensor from being to + connect to new peers if the nodes.json file is large. + --rpc string RPC endpoint used to fetch the latest block (default "https://polygon-rpc.com") + -s, --sensor-id string Sensor ID when writing block/tx events + --trusted-nodes string Trusted nodes file + --write-block-events Whether to write block events to the database (default true) + -B, --write-blocks Whether to write blocks to the database (default true) + --write-tx-events Whether to write transaction events to the database. This option could + significantly increase CPU and memory usage. (default true) + -t, --write-txs Whether to write transactions to the database. This option could significantly + increase CPU and memory usage. (default true) ``` The command also inherits flags from parent commands. diff --git a/p2p/database/database.go b/p2p/database/database.go index de461eaa..d78afbd7 100644 --- a/p2p/database/database.go +++ b/p2p/database/database.go @@ -44,4 +44,7 @@ type Database interface { ShouldWriteBlockEvents() bool ShouldWriteTransactions() bool ShouldWriteTransactionEvents() bool + + // NodeList will return a list of enode URLs. + NodeList(ctx context.Context, limit int) ([]string, error) } diff --git a/p2p/database/datastore.go b/p2p/database/datastore.go index 2a3720ff..98434d0f 100644 --- a/p2p/database/datastore.go +++ b/p2p/database/datastore.go @@ -12,14 +12,15 @@ import ( "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/rs/zerolog/log" + "google.golang.org/api/iterator" ) const ( // Kinds are the datastore equivalent of tables. - blocksKind = "blocks" - blockEventsKind = "block_events" - transactionsKind = "transactions" - transactionEventsKind = "transaction_events" + BlocksKind = "blocks" + BlockEventsKind = "block_events" + TransactionsKind = "transactions" + TransactionEventsKind = "transaction_events" ) // Datastore wraps the datastore client, stores the sensorID, and other @@ -27,12 +28,12 @@ const ( type Datastore struct { client *datastore.Client sensorID string - maxConcurrentWrites int + maxConcurrency int shouldWriteBlocks bool shouldWriteBlockEvents bool shouldWriteTransactions bool shouldWriteTransactionEvents bool - writes chan struct{} + jobs chan struct{} } // DatastoreEvent can represent a peer sending the sensor a transaction hash or @@ -97,7 +98,7 @@ type DatastoreTransaction struct { type DatastoreOptions struct { ProjectID string SensorID string - MaxConcurrentWrites int + MaxConcurrency int ShouldWriteBlocks bool ShouldWriteBlockEvents bool ShouldWriteTransactions bool @@ -115,12 +116,12 @@ func NewDatastore(ctx context.Context, opts DatastoreOptions) Database { return &Datastore{ client: client, sensorID: opts.SensorID, - maxConcurrentWrites: opts.MaxConcurrentWrites, + maxConcurrency: opts.MaxConcurrency, shouldWriteBlocks: opts.ShouldWriteBlocks, shouldWriteBlockEvents: opts.ShouldWriteBlockEvents, shouldWriteTransactions: opts.ShouldWriteTransactions, shouldWriteTransactionEvents: opts.ShouldWriteTransactionEvents, - writes: make(chan struct{}, opts.MaxConcurrentWrites), + jobs: make(chan struct{}, opts.MaxConcurrency), } } @@ -131,18 +132,18 @@ func (d *Datastore) WriteBlock(ctx context.Context, peer *enode.Node, block *typ } if d.ShouldWriteBlockEvents() { - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { - d.writeEvent(peer, blockEventsKind, block.Hash(), blocksKind) - <-d.writes + d.writeEvent(peer, BlockEventsKind, block.Hash(), BlocksKind) + <-d.jobs }() } if d.ShouldWriteBlocks() { - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { d.writeBlock(ctx, block, td) - <-d.writes + <-d.jobs }() } } @@ -157,10 +158,10 @@ func (d *Datastore) WriteBlockHeaders(ctx context.Context, headers []*types.Head } for _, h := range headers { - d.writes <- struct{}{} + d.jobs <- struct{}{} go func(header *types.Header) { d.writeBlockHeader(ctx, header) - <-d.writes + <-d.jobs }(h) } } @@ -175,10 +176,10 @@ func (d *Datastore) WriteBlockBody(ctx context.Context, body *eth.BlockBody, has return } - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { d.writeBlockBody(ctx, body, hash) - <-d.writes + <-d.jobs }() } @@ -188,10 +189,10 @@ func (d *Datastore) WriteBlockHashes(ctx context.Context, peer *enode.Node, hash return } - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { - d.writeEvents(ctx, peer, blockEventsKind, hashes, blocksKind) - <-d.writes + d.writeEvents(ctx, peer, BlockEventsKind, hashes, BlocksKind) + <-d.jobs }() } @@ -202,10 +203,10 @@ func (d *Datastore) WriteTransactions(ctx context.Context, peer *enode.Node, txs } if d.ShouldWriteTransactions() { - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { d.writeTransactions(ctx, txs) - <-d.writes + <-d.jobs }() } @@ -215,16 +216,16 @@ func (d *Datastore) WriteTransactions(ctx context.Context, peer *enode.Node, txs hashes = append(hashes, tx.Hash()) } - d.writes <- struct{}{} + d.jobs <- struct{}{} go func() { - d.writeEvents(ctx, peer, transactionEventsKind, hashes, transactionsKind) - <-d.writes + d.writeEvents(ctx, peer, TransactionEventsKind, hashes, TransactionsKind) + <-d.jobs }() } } func (d *Datastore) MaxConcurrentWrites() int { - return d.maxConcurrentWrites + return d.maxConcurrency } func (d *Datastore) ShouldWriteBlocks() bool { @@ -248,7 +249,7 @@ func (d *Datastore) HasBlock(ctx context.Context, hash common.Hash) bool { return true } - key := datastore.NameKey(blocksKind, hash.Hex(), nil) + key := datastore.NameKey(BlocksKind, hash.Hex(), nil) var block DatastoreBlock err := d.client.Get(ctx, key, &block) @@ -259,7 +260,7 @@ func (d *Datastore) HasBlock(ctx context.Context, hash common.Hash) bool { // values are converted into strings to prevent a loss of precision. func newDatastoreHeader(header *types.Header) *DatastoreHeader { return &DatastoreHeader{ - ParentHash: datastore.NameKey(blocksKind, header.ParentHash.Hex(), nil), + ParentHash: datastore.NameKey(BlocksKind, header.ParentHash.Hex(), nil), UncleHash: header.UncleHash.Hex(), Coinbase: header.Coinbase.Hex(), Root: header.Root.Hex(), @@ -312,7 +313,7 @@ func newDatastoreTransaction(tx *types.Transaction) *DatastoreTransaction { } func (d *Datastore) writeBlock(ctx context.Context, block *types.Block, td *big.Int) { - key := datastore.NameKey(blocksKind, block.Hash().Hex(), nil) + key := datastore.NameKey(BlocksKind, block.Hash().Hex(), nil) _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var dsBlock DatastoreBlock @@ -340,7 +341,7 @@ func (d *Datastore) writeBlock(ctx context.Context, block *types.Block, td *big. dsBlock.Transactions = make([]*datastore.Key, 0, len(block.Transactions())) for _, tx := range block.Transactions() { - dsBlock.Transactions = append(dsBlock.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + dsBlock.Transactions = append(dsBlock.Transactions, datastore.NameKey(TransactionsKind, tx.Hash().Hex(), nil)) } } @@ -349,7 +350,7 @@ func (d *Datastore) writeBlock(ctx context.Context, block *types.Block, td *big. dsBlock.Uncles = make([]*datastore.Key, 0, len(block.Uncles())) for _, uncle := range block.Uncles() { d.writeBlockHeader(ctx, uncle) - dsBlock.Uncles = append(dsBlock.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + dsBlock.Uncles = append(dsBlock.Uncles, datastore.NameKey(BlocksKind, uncle.Hash().Hex(), nil)) } } @@ -409,7 +410,7 @@ func (d *Datastore) writeEvents(ctx context.Context, peer *enode.Node, eventKind // writeBlockHeader will write the block header to datastore if it doesn't // exist. func (d *Datastore) writeBlockHeader(ctx context.Context, header *types.Header) { - key := datastore.NameKey(blocksKind, header.Hash().Hex(), nil) + key := datastore.NameKey(BlocksKind, header.Hash().Hex(), nil) _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var block DatastoreBlock @@ -428,7 +429,7 @@ func (d *Datastore) writeBlockHeader(ctx context.Context, header *types.Header) } func (d *Datastore) writeBlockBody(ctx context.Context, body *eth.BlockBody, hash common.Hash) { - key := datastore.NameKey(blocksKind, hash.Hex(), nil) + key := datastore.NameKey(BlocksKind, hash.Hex(), nil) _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var block DatastoreBlock @@ -446,7 +447,7 @@ func (d *Datastore) writeBlockBody(ctx context.Context, body *eth.BlockBody, has block.Transactions = make([]*datastore.Key, 0, len(body.Transactions)) for _, tx := range body.Transactions { - block.Transactions = append(block.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + block.Transactions = append(block.Transactions, datastore.NameKey(TransactionsKind, tx.Hash().Hex(), nil)) } } @@ -455,7 +456,7 @@ func (d *Datastore) writeBlockBody(ctx context.Context, body *eth.BlockBody, has block.Uncles = make([]*datastore.Key, 0, len(body.Uncles)) for _, uncle := range body.Uncles { d.writeBlockHeader(ctx, uncle) - block.Uncles = append(block.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + block.Uncles = append(block.Uncles, datastore.NameKey(BlocksKind, uncle.Hash().Hex(), nil)) } } @@ -479,7 +480,7 @@ func (d *Datastore) writeTransactions(ctx context.Context, txs []*types.Transact transactions := make([]*DatastoreTransaction, 0, len(txs)) for _, tx := range txs { - keys = append(keys, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + keys = append(keys, datastore.NameKey(TransactionsKind, tx.Hash().Hex(), nil)) transactions = append(transactions, newDatastoreTransaction(tx)) } @@ -487,3 +488,32 @@ func (d *Datastore) writeTransactions(ctx context.Context, txs []*types.Transact log.Error().Err(err).Msg("Failed to write transactions") } } + +func (d *Datastore) NodeList(ctx context.Context, limit int) ([]string, error) { + query := datastore.NewQuery(BlockEventsKind).Order("-Time") + iter := d.client.Run(ctx, query) + + enodes := make(map[string]struct{}) + for len(enodes) < limit { + var event DatastoreEvent + _, err := iter.Next(&event) + if err == iterator.Done { + break + } + if err != nil { + log.Error().Err(err).Msg("Failed to get next block event") + continue + } + + enodes[event.PeerId] = struct{}{} + } + + log.Info().Int("enodes", len(enodes)).Send() + + nodelist := []string{} + for enode := range enodes { + nodelist = append(nodelist, enode) + } + + return nodelist, nil +}