From f3d3172cd6191df713631cbc02007a336f510ff3 Mon Sep 17 00:00:00 2001 From: Robert Pirtle Date: Fri, 13 Oct 2023 11:18:37 -0700 Subject: [PATCH] add another node to e2e test setup (#43) * rename node to kava-validator * add peer api node alongside validator * add docs for e2e setup * rename container kavapruning -> kava-pruning --- .env | 14 +- .gitignore | 4 + DEVELOPMENT.md | 2 + Makefile | 4 + docker-compose.yml | 28 +- docker/README.md | 79 ++++ docker/kava-validator/config.toml | 434 ++++++++++++++++++ .../kava-validator-entrypoint.sh} | 10 +- docker/{kava => shared}/config.toml | 2 +- docker/shared/kava-entrypoint.sh | 45 ++ 10 files changed, 611 insertions(+), 11 deletions(-) create mode 100644 docker/README.md create mode 100644 docker/kava-validator/config.toml rename docker/{kava/kava-entrypoint.sh => kava-validator/kava-validator-entrypoint.sh} (75%) rename docker/{kava => shared}/config.toml (99%) create mode 100755 docker/shared/kava-entrypoint.sh diff --git a/.env b/.env index 5197d89..78da0a0 100644 --- a/.env +++ b/.env @@ -17,10 +17,14 @@ POSTGRES_HOST_PORT=5432 REDIS_CONTAINER_PORT=6379 REDIS_HOST_PORT=6379 +KAVA_CONTAINER_TAG=v0.24.0 KAVA_CONTAINER_EVM_RPC_PORT=8545 -KAVA_HOST_EVM_RPC_PORT=8545 KAVA_CONTAINER_COSMOS_RPC_PORT=26657 + +KAVA_HOST_EVM_RPC_PORT=8545 KAVA_HOST_COSMOS_RPC_PORT=26657 +KAVA_PRUNING_HOST_EVM_RPC_PORT=8555 +KAVA_PRUNING_HOST_COSMOS_RPC_PORT=26667 PROXY_CONTAINER_PORT=7777 PROXY_CONTAINER_EVM_RPC_DATA_PORT=7778 @@ -34,13 +38,13 @@ TEST_PROXY_SERVICE_EVM_RPC_HOSTNAME=localhost:7777 TEST_PROXY_SERVICE_EVM_RPC_DATA_URL=http://localhost:7778 TEST_PROXY_BACKEND_EVM_RPC_HOST_URL=http://localhost:8545 TEST_DATABASE_ENDPOINT_URL=localhost:5432 -TEST_PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava:8545,localhost:7778>http://kava:8545 +TEST_PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 # What level of logging to use for service objects constructed during # unit tests TEST_SERVICE_LOG_LEVEL=ERROR # endpoint the proxy service should use for querying # evm blockchain information related to proxied requests -TEST_EVM_QUERY_SERVICE_URL=http://kava:8545 +TEST_EVM_QUERY_SERVICE_URL=http://kava-validator:8545 ##### Kava Node Config @@ -52,7 +56,7 @@ LOG_LEVEL=TRACE HTTP_READ_TIMEOUT_SECONDS=30 HTTP_WRITE_TIMEOUT_SECONDS=60 # Address of the origin server to proxy all requests to -PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava:8545,localhost:7778>http://kava:8545 +PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 # Configuration for the servcie to connect to it's database DATABASE_NAME=postgres DATABASE_ENDPOINT_URL=postgres:5432 @@ -72,7 +76,7 @@ DATABASE_QUERY_LOGGING_ENABLED=true METRIC_COMPACTION_ROUTINE_INTERVAL_SECONDS=5 # endpoint the proxy service should use for querying # evm blockchain information related to proxied requests -EVM_QUERY_SERVICE_URL=http://kava:8545 +EVM_QUERY_SERVICE_URL=http://kava-validator:8545 # Whether the proxy service should attempt to track and store metrics # related to proxied requests METRIC_COLLECTION_ENABLED=true diff --git a/.gitignore b/.gitignore index bb30b67..89c231d 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,7 @@ cover.html # ignore editor files .vscode/ + +# ignore e2e test validator files +docker/shared/genesis.json +docker/shared/VALIDATOR_NODE_ID diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index e5a3ac2..ea08e5d 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -96,6 +96,8 @@ The e2e tests won't pass if the proxy service and it's dependencies aren't fully make ready e2e-test ``` +For details on the local E2E setup, see [the `docker` directory](./docker/README.md). + ## Test Coverage Report The test commands `make test`, `make unit-test`, and `make e2e-test` generate a `cover.out` raw test coverage report. The coverage can be converted into a user-friendly webpage: diff --git a/Makefile b/Makefile index d63769d..34887f7 100644 --- a/Makefile +++ b/Makefile @@ -68,6 +68,8 @@ up: .PHONY: down # stop the service and it's dependencies down: + rm docker/shared/genesis.json + rm docker/shared/VALIDATOR_NODE_ID docker compose down .PHONY: restart @@ -78,6 +80,8 @@ restart: .PHONY: reset # wipe state and restart the service and all it's dependencies reset: lint + rm docker/shared/genesis.json || exit 0 + rm docker/shared/VALIDATOR_NODE_ID || exit 0 docker compose up -d --build --remove-orphans --renew-anon-volumes --force-recreate .PHONY: refresh diff --git a/docker-compose.yml b/docker-compose.yml index 941c71d..2625654 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,6 +8,7 @@ services: - "${POSTGRES_HOST_PORT}:${POSTGRES_CONTAINER_PORT}" expose: - "${POSTGRES_CONTAINER_PORT}" + # run redis for proxy service to cache responses redis: image: 'bitnami/redis:latest' @@ -16,13 +17,15 @@ services: - "${REDIS_HOST_PORT}:${REDIS_CONTAINER_PORT}" expose: - "${REDIS_CONTAINER_PORT}" + # run single validator kava node to provide a local kava network for development and testing of the proxy service - kava: - image: kava/kava:latest - entrypoint: /docker/kava/kava-entrypoint.sh + kava-validator: + image: kava/kava:${KAVA_CONTAINER_TAG} + entrypoint: /docker/kava/kava-validator-entrypoint.sh env_file: .env volumes: - - ./docker/kava:/docker/kava + - ./docker/kava-validator:/docker/kava + - ./docker/shared:/docker/shared ports: - "${KAVA_HOST_COSMOS_RPC_PORT}:${KAVA_CONTAINER_COSMOS_RPC_PORT}" - "${KAVA_HOST_EVM_RPC_PORT}:${KAVA_CONTAINER_EVM_RPC_PORT}" @@ -31,6 +34,23 @@ services: expose: - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" - "${KAVA_CONTAINER_EVM_RPC_PORT}" + + # peer node with api running validator's network + kava-pruning: + image: kava/kava:${KAVA_CONTAINER_TAG} + entrypoint: /docker/shared/kava-entrypoint.sh + env_file: .env + volumes: + - ./docker/shared:/docker/shared + ports: + - "${KAVA_PRUNING_HOST_COSMOS_RPC_PORT}:${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${KAVA_PRUNING_HOST_EVM_RPC_PORT}:${KAVA_CONTAINER_EVM_RPC_PORT}" + # expose ports for other services to be able to connect to within + # the default docker-compose network + expose: + - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${KAVA_CONTAINER_EVM_RPC_PORT}" + # run proxy service to observe, route, and scale requests to kava api endpoints proxy: build: diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..51a3ac4 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,79 @@ +# e2e test setup + +For the most accurate reflection of real-world use cases, this repo includes an end-to-end test +setup that includes a kava node running with multiple validators. + +This directory contains the configuration and startup files necessary for that setup. + +From the repo root, the following make commands work with these files: +* `make up` - starts the service. if it has never been started before, setup will be performed +* `make down` - stops & destroys the nodes & network +* `make reset` - destroys & recreates the nodes & network. setup will be performed +* `make ready` - blocks the process until the network is producing blocks + +`make e2e-test` and `make test` (which runs unit & e2e tests) both rely on the network being running and ready. + +The following does not affect the network: +* `make refresh` - destroys and recreates only the proxy service. useful for picking up new proxy env variables. + + +## how it works + +The setup runs a network of nodes via docker-compose where each node has its own container: +* `kava-validator` - the validator node (see below) +* `kava-pruning` - an API-enabled peer node + +There is a network running with a single validator. The config and entrypoint for this node is in [kava-validator](./kava-validator/). + +The `shared/` directory is shared between all nodes and is used to share necessary details from the validator to the other nodes in the network. + +The validator has an [entrypoint](./kava-validator/kava-validator-entrypoint.sh) that does the following on first startup: +* `init`s a new kava home directory +* creates a gentx and initializes the network genesis file +* writes its node id to `shared/VALIDATOR_NODE_ID` so peers can connect to it +* copies the genesis file to `shared/genesis` so other peers can use it to connect +* starts the network + +Meanwhile, any peer node in the network (configured in [docker-compose.yml](../docker-compose.yml)) has an [entrypoint](./shared/kava-entrypoint.sh) +that does the following: +* `init`s a new kava home directory +* waits for the validator to share the genesis file +* copies over the validator +* reads the validator's node id from `shared/VALIDATOR_NODE_ID` +* starts the network with the validator as a peer + +## add more nodes + +We'll want more shards in the future!! This setup supports this. To add another api-enabled node to the network: +1. Add a configuration in the [docker-compose.yml](../docker-compose.yml) +```yml + # peer node with api running validator's network + nodename: + image: kava/kava:${KAVA_CONTAINER_TAG} + entrypoint: /docker/shared/kava-entrypoint.sh + env_file: .env + volumes: + - ./docker/shared:/docker/shared + # expose ports for other services to be able to connect to within + # the default docker-compose network + expose: + - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${KAVA_CONTAINER_EVM_RPC_PORT}" + # optional: bind host ports to access outside docker network + ports: + - "${EXPOSED_RPC_PORT}:${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${EXPOSED_EVM_JSON_RPC_PORT}:${KAVA_CONTAINER_EVM_RPC_PORT}" +``` +Note that `nodename` should be replaced with whatever you call your new node. + +The `ports` bindings are only necessary if you want to directly query the node from outside the docker network. +If so, replace and create new env variables for +* `EXPOSED_RPC_PORT` - the host (outside docker network) port for the rpc api +* `EXPOSED_EVM_JSON_RPC_PORT` - the host (outside docker network) port for the evm json rpc api + +2. Add the new node to the proxy backend host map config: `localhost:${PROXY_PORT_FOR_NEW_HOST}>http://nodename:8545` + +3. Make sure the proxy port routes to the proxy service. Configure this in the docker-compose `ports` + of `proxy`: `- "${PROXY_PORT_FOR_NEW_HOST}:${PROXY_CONTAINER_PORT}"` + +4. Run `make reset` and the new node should be running & connected to the network. diff --git a/docker/kava-validator/config.toml b/docker/kava-validator/config.toml new file mode 100644 index 0000000..d6e343b --- /dev/null +++ b/docker/kava-validator/config.toml @@ -0,0 +1,434 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "validator" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST"] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = [ + "Origin", + "Accept", + "Content-Type", + "X-Requested-With", + "X-Server-Time", +] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "localhost:6060" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "81eb8f7c2bab69c66df121ab29649c55ef54f253@172.24.0.5:26656" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "5s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/docker/kava/kava-entrypoint.sh b/docker/kava-validator/kava-validator-entrypoint.sh similarity index 75% rename from docker/kava/kava-entrypoint.sh rename to docker/kava-validator/kava-validator-entrypoint.sh index bbee61c..df1d310 100755 --- a/docker/kava/kava-entrypoint.sh +++ b/docker/kava-validator/kava-validator-entrypoint.sh @@ -5,12 +5,15 @@ set -ex # exit early if geneis.json already exists # which will happen if the kava docker container is stopped and later restarted -if test -f "/root/.kava/config/genesis.json" ; then +if test -f "/root/.kava/config/genesis.json"; then echo "genesis.json alredy exists, skipping chain init and validator initilization" else # create default genesis and node config kava init test --chain-id=localnet_7777-1 + # ensure evm api listens on all addresses + sed -i 's/address = "127.0.0.1:8545"/address = "0.0.0.0:8545"/g' /root/.kava/config/app.toml + # use the test backend to avoid prompts when storing and accessing keys kava config keyring-backend test @@ -27,6 +30,11 @@ else # merge above transaction with previously generated default genesis kava collect-gentxs + + # share node id with peer nodes + kava tendermint show-node-id >/docker/shared/VALIDATOR_NODE_ID + # share genesis file with peer nodes + cp /root/.kava/config/genesis.json /docker/shared/genesis.json fi # set config for kava processes to use diff --git a/docker/kava/config.toml b/docker/shared/config.toml similarity index 99% rename from docker/kava/config.toml rename to docker/shared/config.toml index 0904e14..241d0be 100644 --- a/docker/kava/config.toml +++ b/docker/shared/config.toml @@ -15,7 +15,7 @@ proxy_app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "test" +moniker = "peer" # If this node is many blocks behind the tip of the chain, FastSync # allows them to catchup quickly by downloading blocks in parallel diff --git a/docker/shared/kava-entrypoint.sh b/docker/shared/kava-entrypoint.sh new file mode 100755 index 0000000..b39c23d --- /dev/null +++ b/docker/shared/kava-entrypoint.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# log all commands to stdout and stop the script on the first error +set -ex + +SHARED_DIR=/docker/shared + +# exit early if geneis.json already exists +# which will happen if the kava docker container is stopped and later restarted +if test -f "/root/.kava/config/genesis.json"; then + echo "genesis.json alredy exists, skipping chain init and validator initilization" +else + # create default genesis and node config + kava init test --chain-id=localnet_7777-1 + + # ensure evm api listens on all addresses + sed -i 's/address = "127.0.0.1:8545"/address = "0.0.0.0:8545"/g' /root/.kava/config/app.toml + + # wait for genesis.json from validator + while true; do + current_file_count=$(find "$SHARED_DIR/genesis.json" -maxdepth 1 -type f | wc -l) + if [ "$current_file_count" == 1 ]; then + echo "Found shared genesis.json from validator." + break + else + echo "Waiting for validator to share genesis.json." + sleep 0.25 + fi + done + + # copy over genesis file + cp "$SHARED_DIR/genesis.json" /root/.kava/config/genesis.json +fi + +# set config for kava processes to use +cp /docker/shared/config.toml ~/.kava/config/config.toml + +# get node id of validator +VALIDATOR_NODE_ID="$(cat /docker/shared/VALIDATOR_NODE_ID)" + +# start the kava process +kava start --p2p.persistent_peers "$VALIDATOR_NODE_ID@kava-validator:26656" + +# run forever (kava start is non-blocking) +tail -f /dev/null