diff --git a/.env b/.env index fc526a6..1ec7038 100644 --- a/.env +++ b/.env @@ -38,7 +38,7 @@ TEST_PROXY_SERVICE_EVM_RPC_HOSTNAME=localhost:7777 TEST_PROXY_SERVICE_EVM_RPC_PRUNING_URL=http://localhost:7778 TEST_PROXY_BACKEND_EVM_RPC_HOST_URL=http://localhost:8545 TEST_DATABASE_ENDPOINT_URL=localhost:5432 -TEST_PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 +TEST_PROXY_BACKEND_HOST_URL_MAP=localhost:7774>http://kava-pruning:1317,localhost:7775>http://kava-pruning:26657,localhost:7776>http://kava-pruning:9090,localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 TEST_PROXY_HEIGHT_BASED_ROUTING_ENABLED=true TEST_PROXY_PRUNING_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-pruning:8545,localhost:7778>http://kava-pruning:8545 # What level of logging to use for service objects constructed during @@ -59,7 +59,7 @@ LOG_LEVEL=TRACE HTTP_READ_TIMEOUT_SECONDS=30 HTTP_WRITE_TIMEOUT_SECONDS=60 # Address of the origin server to proxy all requests to -PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 +PROXY_BACKEND_HOST_URL_MAP=localhost:7774>http://kava-validator:1317,localhost:7775>http://kava-validator:26657,localhost:7776>http://kava-validator:9090,localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 # height-based routing will look at the height of an incoming EVM request # iff. the height is "latest", it routes to the corresponding PROXY_PRUNING_BACKEND_HOST_URL_MAP value # otherwise, it falls back to the value in PROXY_BACKEND_HOST_URL_MAP @@ -82,7 +82,7 @@ DATABASE_CONNECTION_MAX_IDLE_SECONDS=5 DATABASE_MAX_OPEN_CONNECTIONS=20 # controls whether the service will attempt to run migrations when it starts RUN_DATABASE_MIGRATIONS=true -DATABASE_QUERY_LOGGING_ENABLED=true +DATABASE_QUERY_LOGGING_ENABLED=false # How often the metric compaction routine will run # defaults to 3600 / 1 hour if not set METRIC_COMPACTION_ROUTINE_INTERVAL_SECONDS=5 @@ -106,7 +106,7 @@ METRIC_PARTITIONINING_PREFILL_PERIOD_DAYS=7 # Used by `ready` script to ensure metric partitions have been created. MINIMUM_REQUIRED_PARTITIONS=30 # Whether metric pruning routines should run on the configured interval, defaults to true -METRIC_PRUNING_ENABLED=true +METRIC_PRUNING_ENABLED=false # How frequenlty metric pruning routines should run # defaults to 1 day METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS=10 diff --git a/decode/evm_rpc.go b/decode/evm_rpc.go index 51a832b..f68abf8 100644 --- a/decode/evm_rpc.go +++ b/decode/evm_rpc.go @@ -254,6 +254,13 @@ func DecodeEVMRPCRequest(body []byte) (*EVMRPCRequestEnvelope, error) { return &request, err } +// DecodeEVMRPCRequestList attempts to decode raw byted into a list of EVMRPCRequestEnvelopes +func DecodeEVMRPCRequestList(body []byte) ([]EVMRPCRequestEnvelope, error) { + var request []EVMRPCRequestEnvelope + err := json.Unmarshal(body, &request) + return request, err +} + // ExtractBlockNumberFromEVMRPCRequest attempts to extract the block number // associated with a request if // - the request is a valid evm rpc request diff --git a/docker-compose.yml b/docker-compose.yml index f6e6fd4..b9681c8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,7 +11,7 @@ services: # run redis for proxy service to cache responses redis: - image: 'bitnami/redis:latest' + image: bitnami/redis:latest env_file: .env ports: - "${REDIS_HOST_PORT}:${REDIS_CONTAINER_PORT}" diff --git a/docker/kava-validator/app.toml b/docker/kava-validator/app.toml new file mode 100644 index 0000000..51e9b95 --- /dev/null +++ b/docker/kava-validator/app.toml @@ -0,0 +1,338 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Base Configuration ### +############################################################################### + +# The minimum gas prices a validator is willing to accept for processing a +# transaction. A transaction's fees must meet the minimum of any denomination +# specified in this config (e.g. 0.25token1;0.0001token2). +minimum-gas-prices = "0.001ukava;1000000000akava" + +# default: the last 362880 states are kept, pruning at 10 block intervals +# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) +# everything: 2 latest states will be kept; pruning at 10 block intervals. +# custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' +pruning = "nothing" + +# These are applied if and only if the pruning strategy is custom. +pruning-keep-recent = "0" +pruning-interval = "0" + +# HaltHeight contains a non-zero block height at which a node will gracefully +# halt and shutdown that can be used to assist upgrades and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-height = 0 + +# HaltTime contains a non-zero minimum block time (in Unix seconds) at which +# a node will gracefully halt and shutdown that can be used to assist upgrades +# and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-time = 0 + +# MinRetainBlocks defines the minimum block height offset from the current +# block being committed, such that all blocks past this offset are pruned +# from Tendermint. It is used as part of the process of determining the +# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates +# that no blocks should be pruned. +# +# This configuration value is only responsible for pruning Tendermint blocks. +# It has no bearing on application state pruning which is determined by the +# "pruning-*" configurations. +# +# Note: Tendermint block pruning is dependant on this parameter in conunction +# with the unbonding (safety threshold) period, state pruning and state sync +# snapshot parameters to determine the correct minimum value of +# ResponseCommit.RetainHeight. +min-retain-blocks = 0 + +# InterBlockCache enables inter-block caching. +inter-block-cache = true + +# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, +# which informs Tendermint what to index. If empty, all events will be indexed. +# +# Example: +# ["message.sender", "message.recipient"] +index-events = [] + +# IavlCacheSize set the size of the iavl tree cache. +# Default cache size is 50mb. +iavl-cache-size = 781250 +trace = true +# IavlDisableFastNode enables or disables the fast node feature of IAVL. +# Default is false. +iavl-disable-fastnode = false + +# EXPERIMENTAL: IAVLLazyLoading enable/disable the lazy loading of iavl store. +# Default is false. +iavl-lazy-loading = true + +# AppDBBackend defines the database backend type to use for the application and snapshots DBs. +# An empty string indicates that a fallback will be used. +# First fallback is the deprecated compile-time types.DBBackend value. +# Second fallback (if the types.DBBackend also isn't set), is the db-backend value set in Tendermint's config.toml. +app-db-backend = "" + +############################################################################### +### Telemetry Configuration ### +############################################################################### + +[telemetry] + +# Prefixed with keys to separate services. +service-name = "" + +# Enabled enables the application telemetry functionality. When enabled, +# an in-memory sink is also enabled by default. Operators may also enabled +# other sinks such as Prometheus. +enabled = false + +# Enable prefixing gauge values with hostname. +enable-hostname = false + +# Enable adding hostname to labels. +enable-hostname-label = false + +# Enable adding service to labels. +enable-service-label = false + +# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. +prometheus-retention-time = 0 + +# GlobalLabels defines a global set of name/value label tuples applied to all +# metrics emitted using the wrapper functions defined in telemetry package. +# +# Example: +# [["chain_id", "cosmoshub-1"]] +global-labels = [] + +############################################################################### +### API Configuration ### +############################################################################### + +[api] + +# Enable defines if the API server should be enabled. +enable = true + +# Swagger defines if swagger documentation should automatically be registered. +swagger = false + +# Address defines the API server to listen on. +address = "tcp://0.0.0.0:1317" + +# MaxOpenConnections defines the number of maximum open connections. +max-open-connections = 1000 + +# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). +rpc-read-timeout = 10 + +# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). +rpc-write-timeout = 0 + +# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). +rpc-max-body-bytes = 1000000 + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enabled-unsafe-cors = true + +############################################################################### +### Rosetta Configuration ### +############################################################################### + +[rosetta] + +# Enable defines if the Rosetta API server should be enabled. +enable = true + +# Address defines the Rosetta API server to listen on. +address = ":8080" + +# Network defines the name of the blockchain that will be returned by Rosetta. +blockchain = "app" + +# Network defines the name of the network that will be returned by Rosetta. +network = "network" + +# Retries defines the number of retries when connecting to the node before failing. +retries = 3 + +# Offline defines if Rosetta server should run in offline mode. +offline = false + +# EnableDefaultSuggestedFee defines if the server should suggest fee by default. +# If 'construction/medata' is called without gas limit and gas price, +# suggested fee based on gas-to-suggest and denom-to-suggest will be given. +enable-fee-suggestion = false + +# GasToSuggest defines gas limit when calculating the fee +gas-to-suggest = 200000 + +# DenomToSuggest defines the defult denom for fee suggestion. +# Price must be in minimum-gas-prices. +denom-to-suggest = "uatom" + +############################################################################### +### gRPC Configuration ### +############################################################################### + +[grpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the gRPC server address to bind to. +address = "0.0.0.0:9090" + +# MaxRecvMsgSize defines the max message size in bytes the server can receive. +# The default value is 10MB. +max-recv-msg-size = "10485760" + +# MaxSendMsgSize defines the max message size in bytes the server can send. +# The default value is math.MaxInt32. +max-send-msg-size = "2147483647" + +############################################################################### +### gRPC Web Configuration ### +############################################################################### + +[grpc-web] + +# GRPCWebEnable defines if the gRPC-web should be enabled. +# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. +enable = true + +# Address defines the gRPC-web server address to bind to. +address = "0.0.0.0:9091" + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enable-unsafe-cors = true + +############################################################################### +### State Sync Configuration ### +############################################################################### + +# State sync snapshots allow other nodes to rapidly join the network without replaying historical +# blocks, instead downloading and applying a snapshot of the application state at a given height. +[state-sync] + +# snapshot-interval specifies the block interval at which local state sync snapshots are +# taken (0 to disable). +snapshot-interval = 0 + +# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). +snapshot-keep-recent = 2 + +############################################################################### +### Store / State Streaming ### +############################################################################### + +[store] +streamers = [] + +[streamers] +[streamers.file] +keys = ["*"] +write_dir = "" +prefix = "" + +# output-metadata specifies if output the metadata file which includes the abci request/responses +# during processing the block. +output-metadata = "true" + +# stop-node-on-error specifies if propagate the file streamer errors to consensus state machine. +stop-node-on-error = "true" + +# fsync specifies if call fsync after writing the files. +fsync = "false" + +############################################################################### +### EVM Configuration ### +############################################################################### + +[evm] + +# Tracer defines the 'vm.Tracer' type that the EVM will use when the node is run in +# debug mode. To enable tracing use the '--evm.tracer' flag when starting your node. +# Valid types are: json|struct|access_list|markdown +tracer = "json" + +# MaxTxGasWanted defines the gas wanted for each eth tx returned in ante handler in check tx mode. +max-tx-gas-wanted = 0 + +############################################################################### +### JSON RPC Configuration ### +############################################################################### + +[json-rpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the EVM RPC HTTP server address to bind to. +address = "0.0.0.0:8545" + +# Address defines the EVM WebSocket server address to bind to. +ws-address = "0.0.0.0:8546" + +# API defines a list of JSON-RPC namespaces that should be enabled +# Example: "eth,txpool,personal,net,debug,web3" +api = "eth,net,web3,debug" + +# GasCap sets a cap on gas that can be used in eth_call/estimateGas (0=infinite). Default: 25,000,000. +gas-cap = 25000000 + +# EVMTimeout is the global timeout for eth_call. Default: 5s. +evm-timeout = "5s" + +# TxFeeCap is the global tx-fee cap for send transaction. Default: 1eth. +txfee-cap = 1 + +# FilterCap sets the global cap for total number of filters that can be created +filter-cap = 200 + +# FeeHistoryCap sets the global cap for total number of blocks that can be fetched +feehistory-cap = 100 + +# LogsCap defines the max number of results can be returned from single 'eth_getLogs' query. +logs-cap = 10000 + +# BlockRangeCap defines the max block range allowed for 'eth_getLogs' query. +block-range-cap = 10000 + +# HTTPTimeout is the read/write timeout of http json-rpc server. +http-timeout = "30s" + +# HTTPIdleTimeout is the idle timeout of http json-rpc server. +http-idle-timeout = "2m0s" + +# AllowUnprotectedTxs restricts unprotected (non EIP155 signed) transactions to be submitted via +# the node's RPC when the global parameter is disabled. +allow-unprotected-txs = false + +# MaxOpenConnections sets the maximum number of simultaneous connections +# for the server listener. +max-open-connections = 0 + +# EnableIndexer enables the custom transaction indexer for the EVM (ethereum transactions). +enable-indexer = false + +# MetricsAddress defines the EVM Metrics server address to bind to. Pass --metrics in CLI to enable +# Prometheus metrics path: /debug/metrics/prometheus +metrics-address = "127.0.0.1:6065" + +############################################################################### +### TLS Configuration ### +############################################################################### + +[tls] + +# Certificate path defines the cert.pem file path for the TLS configuration. +certificate-path = "" + +# Key path defines the key.pem file path for the TLS configuration. +key-path = "" diff --git a/docker/kava-validator/kava-validator-entrypoint.sh b/docker/kava-validator/kava-validator-entrypoint.sh index 6503ab6..7d05d5c 100755 --- a/docker/kava-validator/kava-validator-entrypoint.sh +++ b/docker/kava-validator/kava-validator-entrypoint.sh @@ -55,6 +55,7 @@ fi # set config for kava processes to use cp /docker/kava/config.toml ~/.kava/config/config.toml +cp /docker/kava/app.toml ~/.kava/config/app.toml # start the kava process kava start diff --git a/service/middleware.go b/service/middleware.go index 5b19e72..b002780 100644 --- a/service/middleware.go +++ b/service/middleware.go @@ -22,6 +22,7 @@ const ( DefaultAnonymousUserAgent = "anon" // Service defined context keys DecodedRequestContextKey = "X-KAVA-PROXY-DECODED-REQUEST-BODY" + DecodedBatchRequestContextKey = "X-KAVA-PROXY-DECODED-BATCH-REQUEST-BODY" OriginRoundtripLatencyMillisecondsKey = "X-KAVA-PROXY-ORIGIN-ROUNDTRIP-LATENCY-MILLISECONDS" RequestStartTimeContextKey = "X-KAVA-PROXY-REQUEST-START-TIME" RequestHostnameContextKey = "X-KAVA-PROXY-REQUEST-HOSTNAME" @@ -88,6 +89,63 @@ func (w bodySaverResponseWriter) Write(b []byte) (int, error) { return size, err } +// createDecodeRequestMiddleware is responsible for creating a middleware that +// - decodes the incoming EVM request +// - if successful, puts the decoded request into the context +// - determines if the request is for a single or batch request +// - routes batch requests to BatchProcessingMiddleware +// - routes single requests to next() +func createDecodeRequestMiddleware(next http.HandlerFunc, batchProcessingMiddleware http.HandlerFunc, serviceLogger *logging.ServiceLogger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // skip processing if there is no body content + if r.Body == nil { + serviceLogger.Trace().Msg("no data in request") + next.ServeHTTP(w, r) + return + } + + // read body content + var rawBodyBuffer bytes.Buffer + body := io.TeeReader(r.Body, &rawBodyBuffer) + + rawBody, err := io.ReadAll(body) + if err != nil { + serviceLogger.Trace().Msg(fmt.Sprintf("error %s reading request body %s", err, body)) + next.ServeHTTP(w, r) + return + } + + // Repopulate the request body for the ultimate consumer of this request + r.Body = io.NopCloser(&rawBodyBuffer) + + // attempt to decode as single EVM request + decodedRequest, err := decode.DecodeEVMRPCRequest(rawBody) + if err == nil { + // successfully decoded request as a single valid EVM request + // forward along with decoded request in context + serviceLogger.Trace(). + Any("decoded request", decodedRequest). + Msg("successfully decoded single EVM request") + singleDecodedReqContext := context.WithValue(r.Context(), DecodedRequestContextKey, decodedRequest) + next.ServeHTTP(w, r.WithContext(singleDecodedReqContext)) + return + } + + // attempt to decode as list of requests + batchRequests, err := decode.DecodeEVMRPCRequestList(rawBody) + if err != nil { + serviceLogger.Debug().Msg(fmt.Sprintf("error %s parsing of request body %s", err, rawBody)) + next.ServeHTTP(w, r) + return + } + + // TODO: Trace + serviceLogger.Debug().Any("batch", batchRequests).Msg("successfully decoded batch of requests") + batchDecodedReqContext := context.WithValue(r.Context(), DecodedBatchRequestContextKey, batchRequests) + batchProcessingMiddleware.ServeHTTP(w, r.WithContext(batchDecodedReqContext)) + } +} + // createRequestLoggingMiddleware returns a handler that logs any request to stdout // and if able to decode the request to a known type adds it as a context key // To use the decoded request body, get the value from the context and then @@ -97,46 +155,36 @@ func (w bodySaverResponseWriter) Write(b []byte) (int, error) { func createRequestLoggingMiddleware(h http.HandlerFunc, serviceLogger *logging.ServiceLogger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { requestStartTimeContext := context.WithValue(r.Context(), RequestStartTimeContextKey, time.Now()) + h.ServeHTTP(w, r.WithContext(requestStartTimeContext)) + return - var rawBody []byte - - if r.Body != nil { - var rawBodyBuffer bytes.Buffer - - // Read the request body - body := io.TeeReader(r.Body, &rawBodyBuffer) - - var err error - - rawBody, err = io.ReadAll(body) - - if err != nil { - serviceLogger.Debug().Msg(fmt.Sprintf("error %s reading request body %s", err, body)) - - h.ServeHTTP(w, r) - - return - } - - // Repopulate the request body for the ultimate consumer of this request - r.Body = io.NopCloser(&rawBodyBuffer) - } - - decodedRequest, err := decode.DecodeEVMRPCRequest(rawBody) + // TODO: cleanup. is this middleware still useful? should it actually...log the request? lol + } +} - if err != nil { - serviceLogger.Debug().Msg(fmt.Sprintf("error %s parsing of request body %s", err, rawBody)) +// TODO: replace temp h handler with real deal +func createBatchProcessingMiddleware(h http.HandlerFunc, serviceLogger *logging.ServiceLogger) http.HandlerFunc { + // TODO build or pass in middleware for + // 1) fetching cached or proxied response for single request + // 2) caching & metric creation for single requests - h.ServeHTTP(w, r) + return func(w http.ResponseWriter, r *http.Request) { + batch := r.Context().Value(DecodedRequestContextKey) + decodedReq, ok := (batch).([]decode.EVMRPCRequestEnvelope) + if !ok { + // TODO: update this log for batch + cachemdw.LogCannotCastRequestError(serviceLogger, r) - return + // if we can't get decoded request then assign it empty structure to avoid panics + decodedReq = []decode.EVMRPCRequestEnvelope{} } - serviceLogger.Trace().Msg(fmt.Sprintf("decoded request body %+v", decodedRequest)) + serviceLogger.Info().Any("batch", decodedReq).Msg("the context's decoded batch!") - decodedRequestBodyContext := context.WithValue(requestStartTimeContext, DecodedRequestContextKey, decodedRequest) + // TODO: loop requests, fire off concurrently, + // TODO: consider recombining uncached responses before requesting from backend(s) - h.ServeHTTP(w, r.WithContext(decodedRequestBodyContext)) + h.ServeHTTP(w, r) } } diff --git a/service/service.go b/service/service.go index 35d1542..dbf4e78 100644 --- a/service/service.go +++ b/service/service.go @@ -74,9 +74,14 @@ func New(ctx context.Context, config config.Config, serviceLogger *logging.Servi // - if not present marks as uncached in context and forwards to next middleware cacheMiddleware := serviceCache.IsCachedMiddleware(proxyMiddleware) + // TODO: docs + batchProcessingMiddleware := createBatchProcessingMiddleware(cacheMiddleware, serviceLogger) + // TODO: docs + decodeRequestMiddleware := createDecodeRequestMiddleware(cacheMiddleware, batchProcessingMiddleware, serviceLogger) + // create an http handler that will log the request to stdout // this handler will run before the proxyMiddleware handler - requestLoggingMiddleware := createRequestLoggingMiddleware(cacheMiddleware, serviceLogger) + requestLoggingMiddleware := createRequestLoggingMiddleware(decodeRequestMiddleware, serviceLogger) // register healthcheck handler that can be used during deployment and operations // to determine if the service is ready to receive requests