diff --git a/.env b/.env index 50e21bb..5bda30c 100644 --- a/.env +++ b/.env @@ -43,6 +43,7 @@ TEST_DATABASE_ENDPOINT_URL=localhost:5432 TEST_PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7778>http://kava-pruning:8545 TEST_PROXY_HEIGHT_BASED_ROUTING_ENABLED=true TEST_PROXY_PRUNING_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-pruning:8545,localhost:7778>http://kava-pruning:8545 +TEST_PROXY_SHARD_BACKEND_HOST_URL_MAP=localhost:7777>10|http://kava-shard-10:8545|20|http://kava-shard-20:8545 # What level of logging to use for service objects constructed during # unit tests TEST_SERVICE_LOG_LEVEL=ERROR @@ -71,9 +72,12 @@ PROXY_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-validator:8545,localhost:7 # otherwise, it falls back to the value in PROXY_BACKEND_HOST_URL_MAP PROXY_HEIGHT_BASED_ROUTING_ENABLED=true PROXY_PRUNING_BACKEND_HOST_URL_MAP=localhost:7777>http://kava-pruning:8545,localhost:7778>http://kava-pruning:8545 +# enable shard routing for hosts defined in PROXY_SHARD_BACKEND_HOST_URL_MAP +PROXY_SHARDED_ROUTING_ENABLED=true +PROXY_SHARD_BACKEND_HOST_URL_MAP=localhost:7777>10|http://kava-shard-10:8545|20|http://kava-shard-20:8545 # PROXY_MAXIMUM_REQ_BATCH_SIZE is a proxy-enforced limit on the number of subrequest in a batch PROXY_MAXIMUM_REQ_BATCH_SIZE=100 -# Configuration for the servcie to connect to it's database +# Configuration for the service to connect to it's database DATABASE_NAME=postgres DATABASE_ENDPOINT_URL=postgres:5432 DATABASE_USERNAME=postgres diff --git a/architecture/PROXY_ROUTING.md b/architecture/PROXY_ROUTING.md index 0a730d1..15dc843 100644 --- a/architecture/PROXY_ROUTING.md +++ b/architecture/PROXY_ROUTING.md @@ -77,7 +77,7 @@ Now suppose you want multiple backends for the same host. The proxy service supports height-based routing to direct requests that only require the most recent block to a different cluster. -This support is handled via the [`HeightShardingProxies` implementation](../service/shard.go#L16). +This support is handled via the [`PruningOrDefaultProxies` implementation](../service/shard.go#L17). This is configured via the `PROXY_HEIGHT_BASED_ROUTING_ENABLED` and `PROXY_PRUNING_BACKEND_HOST_URL_MAP` environment variables. @@ -136,6 +136,114 @@ in `PROXY_BACKEND_HOST_URL_MAP`. Any request made to a host not in the `PROXY_BACKEND_HOST_URL_MAP` map responds 502 Bad Gateway. +## Sharding + +Taking the example one step further, suppose the backend consists of data shards each containing a set of blocks. Although sharded routing can be configured without pruning vs default cluster routing, this example assumes it is. + +The above example supports fielding requests to a particular endpoint with pruning & archive clusters: +* request for tip-of-chain -> pruning cluster +* everything else -> archive cluster ("default") + +The proxy service supports breaking down "everything else" further by defining "shards": clusters that contain a fixed set of block heights. + +This is configured via the `PROXY_SHARDED_ROUTING_ENABLED` and `PROXY_SHARD_BACKEND_HOST_URL_MAP` environment variables: +* `PROXY_SHARDED_ROUTING_ENABLED` - flag to toggle this functionality +* `PROXY_SHARD_BACKEND_HOST_URL_MAP` - encodes the shard cluster urls and block ranges for a given endpoint. +This support is handled via the [`ShardProxies` implementation](../service/shard.go#L103). + + +The map is encoded as follows: +``` +PROXY_SHARDED_ROUTING_ENABLED=true +PROXY_SHARD_BACKEND_HOST_URL_MAP=HOST_A>ENDBLOCK_A1|ROUTE_A1|ENDBLOCK_A2|ROUTE_A2,HOST_B>ENDBLOCK_B1|ROUTE_B1 +``` + +This defines two shards for `HOST_A` and one shard for `HOST_B`: +* `HOST_A`'s shards: + * blocks 1 to `ENDBLOCK_A1` hosted at `ROUTE_A1` + * blocks `ENDBLOCK_A1`+1 to `ENDBLOCK_A2` hosted at `ROUTE_A2` +* `HOST_B`'s shard: + * blocks 1 to `ENDBLOCK_B1` hosted at `ROUTE_B1` + +Shards are inclusive of their end blocks and they must collectively contain all data from block 1 to the end bock of the last shard. + +Shards field requests that would route to the "Default" cluster in any of the above configurations: +* requests for `"earliest"` block are routed to the first defined shard +* any request for a specific height that is contained in a shard is routed to that shard. + +All other requests continue to route to the default cluster. In this context, the default cluster is referred to as the "active" cluster (see below). + +Requests for tx hashes or block hashes are routed to the "active" cluster. + +### Shard Routing + +When `PROXY_SHARDED_ROUTING_ENABLED` is `true`, "everything else" can be broken down further into clusters that contain fixed ranges of blocks. + +As an example, consider a setup that has the following clusters: +* Pruning cluster (`http://kava-pruning:8545`) +* "Active" cluster - blocks 4,000,001 to chain tip (`http://kava-archive:8545`) +* Shard 2 - blocks 2,000,001 to 4,000,000 (`http://kava-shard-4M:8545`) +* Shard 1 - blocks 1 to 2,000,000 (`http://kava-shard-2M:8545`) + +The proxy service can be configured to as follows: +``` +PROXY_HEIGHT_BASED_ROUTING_ENABLED=true +PROXY_SHARDED_ROUTING_ENABLED=true +PROXY_BACKEND_HOST_URL_MAP=evm.data.kava.io>http://kava-archive:8545 +PROXY_PRUNING_BACKEND_HOST_URL_MAP=evm.data.kava.io>http://kava-pruning:8545 +PROXY_SHARD_BACKEND_HOST_URL_MAP=evm.data.kava.io>2000000|http://kava-shard-2M:8545|4000000|http://kava-shard-4M:8545 +``` + +This value is parsed into a map that looks like the following: +``` +{ + "default": { + "evm.data.kava.io" => "http://kava-archive:8545", + }, + "pruning": { + "evm.data.kava.io" => "http://kava-pruning:8545", + }, + "shards": { + 2000000 => "http://kava-shard-2M:8545", + 4000000 => "http://kava-shard-4M:8545" + } +} +``` + +All requests that would route to the "default" cluster in teh "Default vs Pruning Backend Routing" example route as follows: +* requests for specific height between 1 and 2M -> `http://kava-shard-2M:8545` + * this includes requests for `"earliest"` +* requests for specific height between 2M+1 and 4M -> `http://kava-shard-4M:8545` +* requests for a block hash or tx hash -> the active cluster: `http://kava-archive:8545`. + +Otherwise, requests are routed as they are in the "Default vs Pruning Backend Routing" example. + +![Proxy service configured with shard-based routing](images/proxy_service_sharding.jpg) + +### "Active" Cluster + +In practice, a full-archive node can be used as the active cluster. However, the data can be slimmed down by accounting for the fact that it doesn't need the application data for blocks contained in the shards. + +The optimally-sized active cluster runs on a unique data set that includes: +* At least one recent block - this will be the starting point for the node to begin syncing once spun up. Ideally, this is the last shard's end block + 1. +* A complete blockstore, cometbft state, and tx_index + +The blockstore, cometbft state, and tx_index are required for fielding requests for data on unknown heights. These are requests for block hashes and transaction hashes. Because the proxy service can't know which height a particular hash is for (and therefore, to which shard the request should be routed), these complete databases are required to handle requests for the hashes. + +The optimally-sized node data can be created from a full-archive node by pruning only the application state for the node. On Kava, this can be accomplished with the `--only-app-state` flag of the shard command: +``` +kava shard --start --end -1 --only-appstate- +``` + +The bulk of data on cosmos-sdk chains like Kava is in the application.db, so pruning the blocks allow for a much smaller cluster footprint than a full archive node. + +### Shard Clusters + +On Kava, data for shards can be created with the `shard` command of the Kava CLI from any node that contains the desired shard block range: +``` +kava shard --home ~/.kava --start --end +``` + ## Metrics When metrics are enabled, the `proxied_request_metrics` table tracks the backend to which requests @@ -147,6 +255,7 @@ always `DEFAULT`. When enabled, the column will have one of the following values: * `DEFAULT` - the request was routed to the backend defined in `PROXY_BACKEND_HOST_URL_MAP` * `PRUNING` - the request was routed to the backend defined in `PROXY_PRUNING_BACKEND_HOST_URL_MAP` +* `SHARD` - the request was routed to a shard defined in the `PROXY_SHARD_BACKEND_HOST_URL_MAP` Additionally, the actual URL to which the request is routed to is tracked in the `response_backend_route` column. diff --git a/architecture/images/proxy_service_sharding.jpg b/architecture/images/proxy_service_sharding.jpg new file mode 100644 index 0000000..cfcdd3a Binary files /dev/null and b/architecture/images/proxy_service_sharding.jpg differ diff --git a/ci.docker-compose.yml b/ci.docker-compose.yml index 209346e..2d19f18 100644 --- a/ci.docker-compose.yml +++ b/ci.docker-compose.yml @@ -23,11 +23,14 @@ services: env_file: .env environment: PROXY_HEIGHT_BASED_ROUTING_ENABLED: "true" + PROXY_SHARDED_ROUTING_ENABLED: "true" # use public testnet as backend origin server to avoid having # to self-host a beefy Github Action runner # to build and run a kava node each execution PROXY_BACKEND_HOST_URL_MAP: localhost:7777>https://evmrpcdata.internal.testnet.proxy.kava.io,localhost:7778>https://evmrpc.internal.testnet.proxy.kava.io PROXY_PRUNING_BACKEND_HOST_URL_MAP: localhost:7777>https://evmrpc.internal.testnet.proxy.kava.io + # fake the shards by defining shards with existing backends + PROXY_SHARD_BACKEND_HOST_URL_MAP: localhost:7777>10|https://evmrpc.internal.testnet.proxy.kava.io|20|https://evmrpc.internal.testnet.proxy.kava.io EVM_QUERY_SERVICE_URL: https://evmrpc.internal.testnet.proxy.kava.io ports: - "${PROXY_HOST_PORT}:${PROXY_CONTAINER_PORT}" diff --git a/config/config.go b/config/config.go index b244ce4..bd275d7 100644 --- a/config/config.go +++ b/config/config.go @@ -20,6 +20,9 @@ type Config struct { EnableHeightBasedRouting bool ProxyPruningBackendHostURLMapRaw string ProxyPruningBackendHostURLMap map[string]url.URL + EnableShardedRouting bool + ProxyShardBackendHostURLMapRaw string + ProxyShardBackendHostURLMap map[string]IntervalURLMap ProxyMaximumBatchSize int EvmQueryServiceURL string DatabaseName string @@ -65,6 +68,8 @@ const ( PROXY_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY = "PROXY_BACKEND_HOST_URL_MAP" PROXY_HEIGHT_BASED_ROUTING_ENABLED_KEY = "PROXY_HEIGHT_BASED_ROUTING_ENABLED" PROXY_PRUNING_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY = "PROXY_PRUNING_BACKEND_HOST_URL_MAP" + PROXY_SHARDED_ROUTING_ENABLED_ENVIRONMENT_KEY = "PROXY_SHARDED_ROUTING_ENABLED" + PROXY_SHARD_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY = "PROXY_SHARD_BACKEND_HOST_URL_MAP" PROXY_MAXIMUM_BATCH_SIZE_ENVIRONMENT_KEY = "PROXY_MAXIMUM_REQ_BATCH_SIZE" DEFAULT_PROXY_MAXIMUM_BATCH_SIZE = 500 PROXY_SERVICE_PORT_ENVIRONMENT_KEY = "PROXY_SERVICE_PORT" @@ -220,6 +225,65 @@ func ParseRawProxyBackendHostURLMap(raw string) (map[string]url.URL, error) { return hostURLMap, combinedErr } +// ParseRawShardRoutingBackendHostURLMap attempts to parse backend host URL mapping for shards. +// The shard map is a map of host name => (map of end block => backend route) +// returning the mapping and error (if any) +func ParseRawShardRoutingBackendHostURLMap(raw string) (map[string]IntervalURLMap, error) { + parsed := make(map[string]IntervalURLMap) + hostConfigs := strings.Split(raw, ",") + for _, hc := range hostConfigs { + pieces := strings.Split(hc, ">") + if len(pieces) != 2 { + return parsed, fmt.Errorf("expected shard definition like :|, found '%s'", hc) + } + + host := pieces[0] + endpointBackendValues := strings.Split(pieces[1], "|") + if len(endpointBackendValues)%2 != 0 { + return parsed, fmt.Errorf("unexpected | sequence for %s: %s", + host, pieces[1], + ) + } + + prevMaxHeight := uint64(0) + backendByEndHeight := make(map[uint64]*url.URL, len(endpointBackendValues)/2) + for i := 0; i < len(endpointBackendValues); i += 2 { + endHeight, err := strconv.ParseUint(endpointBackendValues[i], 10, 64) + if err != nil || endHeight == 0 { + return parsed, fmt.Errorf("invalid shard end height (%s) for host %s: %s", + endpointBackendValues[i], host, err, + ) + } + // ensure this is the only shard defined with this endBlock for this host + if _, exists := backendByEndHeight[endHeight]; exists { + return parsed, fmt.Errorf("multiple shards defined for %s with end block %d", host, endHeight) + } + // require height definitions to be ordered + // this is enforced because the shards are expected to cover the entire range + // from the previous shard's endBlock to the current shard's endBlock + if endHeight < prevMaxHeight { + return parsed, fmt.Errorf( + "shard map expects end blocks to be ordered. for host %s, shard for height %d found after shard for height %d", + host, endHeight, prevMaxHeight, + ) + } + + backendRoute, err := url.Parse(endpointBackendValues[i+1]) + if err != nil || backendRoute.String() == "" { + return parsed, fmt.Errorf("invalid shard backend route (%s) for height %d of host %s: %s", + endpointBackendValues[i+1], endHeight, host, err, + ) + } + backendByEndHeight[endHeight] = backendRoute + prevMaxHeight = endHeight + } + + parsed[host] = NewIntervalURLMap(backendByEndHeight) + } + + return parsed, nil +} + // ParseRawHostnameToHeaderValueMap attempts to parse mappings of hostname to corresponding header value. // For example hostname to access-control-allow-origin header value. func ParseRawHostnameToHeaderValueMap(raw string) (map[string]string, error) { @@ -257,10 +321,12 @@ func ParseRawHostnameToHeaderValueMap(raw string) (map[string]string, error) { func ReadConfig() Config { rawProxyBackendHostURLMap := os.Getenv(PROXY_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY) rawProxyPruningBackendHostURLMap := os.Getenv(PROXY_PRUNING_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY) + rawProxyShardedBackendHostURLMap := os.Getenv(PROXY_SHARD_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY) // best effort to parse, callers are responsible for validating // before using any values read parsedProxyBackendHostURLMap, _ := ParseRawProxyBackendHostURLMap(rawProxyBackendHostURLMap) parsedProxyPruningBackendHostURLMap, _ := ParseRawProxyBackendHostURLMap(rawProxyPruningBackendHostURLMap) + parsedProxyShardedBackendHostURLMap, _ := ParseRawShardRoutingBackendHostURLMap(rawProxyShardedBackendHostURLMap) whitelistedHeaders := os.Getenv(WHITELISTED_HEADERS_ENVIRONMENT_KEY) parsedWhitelistedHeaders := strings.Split(whitelistedHeaders, ",") @@ -282,6 +348,9 @@ func ReadConfig() Config { EnableHeightBasedRouting: EnvOrDefaultBool(PROXY_HEIGHT_BASED_ROUTING_ENABLED_KEY, false), ProxyPruningBackendHostURLMapRaw: rawProxyPruningBackendHostURLMap, ProxyPruningBackendHostURLMap: parsedProxyPruningBackendHostURLMap, + EnableShardedRouting: EnvOrDefaultBool(PROXY_HEIGHT_BASED_ROUTING_ENABLED_KEY, false), + ProxyShardBackendHostURLMapRaw: rawProxyShardedBackendHostURLMap, + ProxyShardBackendHostURLMap: parsedProxyShardedBackendHostURLMap, ProxyMaximumBatchSize: EnvOrDefaultInt(PROXY_MAXIMUM_BATCH_SIZE_ENVIRONMENT_KEY, DEFAULT_PROXY_MAXIMUM_BATCH_SIZE), DatabaseName: os.Getenv(DATABASE_NAME_ENVIRONMENT_KEY), DatabaseEndpointURL: os.Getenv(DATABASE_ENDPOINT_URL_ENVIRONMENT_KEY), diff --git a/config/config_test.go b/config/config_test.go index c29375a..3c46b57 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,11 +1,13 @@ package config_test import ( + "net/url" "os" "testing" "github.com/kava-labs/kava-proxy-service/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -14,6 +16,8 @@ var ( proxyServiceBackendHostURLMap = os.Getenv("TEST_PROXY_BACKEND_HOST_URL_MAP") proxyServiceHeightBasedRouting = os.Getenv("TEST_PROXY_HEIGHT_BASED_ROUTING_ENABLED") proxyServicePruningBackendHostURLMap = os.Getenv("TEST_PROXY_PRUNING_BACKEND_HOST_URL_MAP") + proxyServiceShardedRoutingEnabled = os.Getenv("TEST_PROXY_HEIGHT_BASED_ROUTING_ENABLED") + proxyServiceShardBackendHostURLMap = os.Getenv("TEST_PROXY_SHARD_BACKEND_HOST_URL_MAP") ) func TestUnitTestEnvODefaultReturnsDefaultIfEnvironmentVariableNotSet(t *testing.T) { @@ -53,10 +57,42 @@ func TestUnitTestParseHostMapReturnsErrEmptyHostMapWhenEmpty(t *testing.T) { assert.ErrorIs(t, err, config.ErrEmptyHostMap) } +func TestUnitTestParseRawShardRoutingBackendHostURLMap(t *testing.T) { + parsed, err := config.ParseRawShardRoutingBackendHostURLMap("localhost:7777>10|http://kava-shard-10:8545|20|http://kava-shard-20:8545") + require.NoError(t, err) + expected := map[string]config.IntervalURLMap{ + "localhost:7777": config.NewIntervalURLMap(map[uint64]*url.URL{ + 10: mustUrl("http://kava-shard-10:8545"), + 20: mustUrl("http://kava-shard-20:8545"), + }), + } + require.Equal(t, expected, parsed) + + _, err = config.ParseRawShardRoutingBackendHostURLMap("no-shard-def") + require.ErrorContains(t, err, "expected shard definition like :|") + + _, err = config.ParseRawShardRoutingBackendHostURLMap("invalid-shard-def>odd|number|bad") + require.ErrorContains(t, err, "unexpected | sequence for invalid-shard-def") + + _, err = config.ParseRawShardRoutingBackendHostURLMap("invalid-height>NaN|backend-host") + require.ErrorContains(t, err, "invalid shard end height (NaN) for host invalid-height") + + _, err = config.ParseRawShardRoutingBackendHostURLMap("invalid-backend-host>100|") + require.ErrorContains(t, err, "invalid shard backend route () for height 100 of host invalid-backend-host") + + _, err = config.ParseRawShardRoutingBackendHostURLMap("unsorted-shards>100|backend-100|50|backend-50") + require.ErrorContains(t, err, "shard map expects end blocks to be ordered") + + _, err = config.ParseRawShardRoutingBackendHostURLMap("multiple-shards-for-same-height>10|magic|20|dino|20|dinosaur") + require.ErrorContains(t, err, "multiple shards defined for multiple-shards-for-same-height with end block 20") +} + func setDefaultEnv() { os.Setenv(config.PROXY_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, proxyServiceBackendHostURLMap) os.Setenv(config.PROXY_HEIGHT_BASED_ROUTING_ENABLED_KEY, proxyServiceHeightBasedRouting) os.Setenv(config.PROXY_PRUNING_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, proxyServicePruningBackendHostURLMap) + os.Setenv(config.PROXY_SHARDED_ROUTING_ENABLED_ENVIRONMENT_KEY, proxyServiceShardedRoutingEnabled) + os.Setenv(config.PROXY_SHARD_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, proxyServiceShardBackendHostURLMap) os.Setenv(config.PROXY_SERVICE_PORT_ENVIRONMENT_KEY, proxyServicePort) os.Setenv(config.LOG_LEVEL_ENVIRONMENT_KEY, config.DEFAULT_LOG_LEVEL) } diff --git a/config/intervalmap.go b/config/intervalmap.go new file mode 100644 index 0000000..07dc531 --- /dev/null +++ b/config/intervalmap.go @@ -0,0 +1,41 @@ +package config + +import ( + "net/url" + "sort" +) + +// IntervalURLMap stores URLs associated with a range of numbers. +// The intervals are defined by their endpoints and must not overlap. +// The intervals are inclusive of the endpoints. +type IntervalURLMap struct { + UrlByEndHeight map[uint64]*url.URL + endpoints []uint64 +} + +// NewIntervalURLMap creates a new IntervalMap from a map of interval endpoint => url. +// The intervals are inclusive of their endpoint. +// ie. if the lowest value endpoint in the map is 10, the interval is for all numbers 1 through 10. +func NewIntervalURLMap(urlByEndHeight map[uint64]*url.URL) IntervalURLMap { + endpoints := make([]uint64, 0, len(urlByEndHeight)) + for e := range urlByEndHeight { + endpoints = append(endpoints, e) + } + sort.Slice(endpoints, func(i, j int) bool { return endpoints[i] < endpoints[j] }) + + return IntervalURLMap{ + UrlByEndHeight: urlByEndHeight, + endpoints: endpoints, + } +} + +// Lookup finds the value associated with the interval containing the number, if it exists. +func (im *IntervalURLMap) Lookup(num uint64) (*url.URL, uint64, bool) { + i := sort.Search(len(im.endpoints), func(i int) bool { return im.endpoints[i] >= num }) + + if i < len(im.endpoints) && num <= im.endpoints[i] { + return im.UrlByEndHeight[im.endpoints[i]], im.endpoints[i], true + } + + return nil, 0, false +} diff --git a/config/intervalmap_test.go b/config/intervalmap_test.go new file mode 100644 index 0000000..6a9b24c --- /dev/null +++ b/config/intervalmap_test.go @@ -0,0 +1,57 @@ +package config_test + +import ( + "fmt" + "net/url" + "testing" + + "github.com/kava-labs/kava-proxy-service/config" + "github.com/stretchr/testify/require" +) + +func mustUrl(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(fmt.Sprintf("failed to parse url %s: %s", s, err)) + } + return u +} + +func TestUnitTestIntervalMap(t *testing.T) { + valueByEndpoint := map[uint64]*url.URL{ + 10: mustUrl("A"), + 20: mustUrl("B"), + 100: mustUrl("C"), + } + intervalmap := config.NewIntervalURLMap(valueByEndpoint) + + testCases := []struct { + value uint64 + expectFound bool + expectEndHeight uint64 + expectResult string + }{ + {1, true, 10, "A"}, + {9, true, 10, "A"}, + {10, true, 10, "A"}, + {15, true, 20, "B"}, + {20, true, 20, "B"}, + {75, true, 100, "C"}, + {100, true, 100, "C"}, + {101, false, 0, ""}, + {300, false, 0, ""}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Lookup(%d)", tc.value), func(t *testing.T) { + result, endHeight, found := intervalmap.Lookup(tc.value) + require.Equal(t, tc.expectFound, found, "unexpected found value") + require.Equal(t, tc.expectEndHeight, endHeight, "unexpected end height found") + if tc.expectResult == "" { + require.Nil(t, result) + } else { + require.Equal(t, tc.expectResult, result.String()) + } + }) + } +} diff --git a/config/validate.go b/config/validate.go index 8e87176..bc98fc2 100644 --- a/config/validate.go +++ b/config/validate.go @@ -44,6 +44,10 @@ func Validate(config Config) error { allErrs = errors.Join(allErrs, fmt.Errorf("invalid %s specified %s", PROXY_PRUNING_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, config.ProxyPruningBackendHostURLMapRaw), err) } + if err = validateShardRoutingBackendHostURLMap(config.ProxyShardBackendHostURLMapRaw); err != nil { + allErrs = errors.Join(allErrs, fmt.Errorf("invalid %s specified %s", PROXY_SHARD_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, config.ProxyShardBackendHostURLMapRaw), err) + } + if err = validateDefaultHostMapContainsHosts( PROXY_PRUNING_BACKEND_HOST_URL_MAP_ENVIRONMENT_KEY, config.ProxyBackendHostURLMapParsed, @@ -122,6 +126,12 @@ func validateHostnameToHeaderValueMap(raw string, allowEmpty bool) error { return err } +// validateShardRoutingBackendHostURLMap validates the host-backend url map for shard-based routing +func validateShardRoutingBackendHostURLMap(raw string) error { + _, err := ParseRawShardRoutingBackendHostURLMap(raw) + return err +} + // validateDefaultHostMapContainsHosts returns an error if there are hosts in hostMap that // are not in defaultHostMap // example: hosts in the pruning map should always have a default fallback backend diff --git a/config/validate_test.go b/config/validate_test.go index a4d8563..ee60bbc 100644 --- a/config/validate_test.go +++ b/config/validate_test.go @@ -1,9 +1,10 @@ package config_test import ( - "github.com/stretchr/testify/require" "testing" + "github.com/stretchr/testify/require" + "github.com/kava-labs/kava-proxy-service/config" "github.com/stretchr/testify/assert" ) @@ -84,6 +85,15 @@ func TestUnitTestValidateConfigReturnsErrorIfInvalidProxyPruningBackendHostURLCo assert.NotNil(t, err) } +func TestUnitTestValidateConfigReturnsErrorIfInvaidShardRoutingBackendURLMap(t *testing.T) { + testConfig := defaultConfig + testConfig.ProxyShardBackendHostURLMapRaw = "my-misconfigured-backend>10|backend-10|20|backend-20|20|uh-oh-20-again" + + err := config.Validate(testConfig) + + assert.NotNil(t, err) +} + func TestUnitTestValidateConfigReturnsErrorIfInvalidProxyServicePort(t *testing.T) { testConfig := defaultConfig testConfig.ProxyServicePort = "abc" diff --git a/docker-compose.yml b/docker-compose.yml index 1731230..178fba2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -51,6 +51,34 @@ services: - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" - "${KAVA_CONTAINER_EVM_RPC_PORT}" + # shards are in name only. they are standard peer nodes, but will only recieve traffic + # for a specific block range. kava-shard-10 receives requests for heights 1-10 + kava-shard-10: + image: kava/kava:${KAVA_CONTAINER_TAG} + entrypoint: /docker/shared/kava-entrypoint.sh + env_file: .env + volumes: + - ./docker/shared:/docker/shared + # expose ports for other services to be able to connect to within + # the default docker-compose network + expose: + - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${KAVA_CONTAINER_EVM_RPC_PORT}" + + # shards are in name only. they are standard peer nodes, but will only recieve traffic + # for a specific block range. kava-shard-20 receives requests for heights 11-20 + kava-shard-20: + image: kava/kava:${KAVA_CONTAINER_TAG} + entrypoint: /docker/shared/kava-entrypoint.sh + env_file: .env + volumes: + - ./docker/shared:/docker/shared + # expose ports for other services to be able to connect to within + # the default docker-compose network + expose: + - "${KAVA_CONTAINER_COSMOS_RPC_PORT}" + - "${KAVA_CONTAINER_EVM_RPC_PORT}" + # run proxy service to observe, route, and scale requests to kava api endpoints proxy: build: diff --git a/main_test.go b/main_test.go index 18f589a..6f554bd 100644 --- a/main_test.go +++ b/main_test.go @@ -89,8 +89,9 @@ var ( // search for any request metrics between startTime and time.Now() for particular request methods // if testedmethods is empty, all metrics in timeframe are returned. func findMetricsInWindowForMethods(db database.PostgresClient, startTime time.Time, testedmethods []string) []database.ProxiedRequestMetric { + extension := time.Duration(testExtendMetricWindowMs) * time.Millisecond // add small buffer into future in case metrics are still being created - endTime := time.Now().Add(time.Duration(testExtendMetricWindowMs) * time.Millisecond) + endTime := time.Now().Add(extension) var nextCursor int64 var proxiedRequestMetrics []database.ProxiedRequestMetric @@ -130,6 +131,8 @@ func findMetricsInWindowForMethods(db database.PostgresClient, startTime time.Ti } } + // ensure next window has no overlap with current one + time.Sleep(extension) return requestMetricsDuringRequestWindow } @@ -422,14 +425,26 @@ func TestE2ETest_HeightBasedRouting(t *testing.T) { { name: "request for non-latest height -> default", method: "eth_getBlockByNumber", - params: []interface{}{"0x2", false}, + params: []interface{}{"0x15", false}, // block 21 is beyond shards expectRoute: service.ResponseBackendDefault, }, { - name: "request for earliest height -> default", + name: "request for height in 1st shard -> shard", + method: "eth_getBlockByNumber", + params: []interface{}{"0x2", false}, // block 2 + expectRoute: service.ResponseBackendShard, + }, + { + name: "request for height in 2nd shard -> shard", + method: "eth_getBlockByNumber", + params: []interface{}{"0xF", false}, // block 15 + expectRoute: service.ResponseBackendShard, + }, + { + name: "request for earliest height -> 1st shard", method: "eth_getBlockByNumber", params: []interface{}{"earliest", false}, - expectRoute: service.ResponseBackendDefault, + expectRoute: service.ResponseBackendShard, }, { name: "request for latest height -> pruning", diff --git a/service/proxy.go b/service/proxy.go index ca81b95..0894985 100644 --- a/service/proxy.go +++ b/service/proxy.go @@ -14,6 +14,7 @@ import ( const ( ResponseBackendDefault = "DEFAULT" ResponseBackendPruning = "PRUNING" + ResponseBackendShard = "SHARD" ) // Proxies is an interface for getting a reverse proxy for a given request. @@ -29,18 +30,30 @@ type ProxyMetadata struct { BackendName string // url of the backend used BackendRoute url.URL + // height interval endpoint of shard. + // only defined if BackendName is "SHARD" + ShardEndHeight uint64 } // NewProxies creates a Proxies instance based on the service configuration: // - for non-sharding configuration, it returns a HostProxies -// - for sharding configurations, it returns a HeightShardingProxies +// - for height-based-routing configurations, it returns a PruningOrDefaultProxies func NewProxies(config config.Config, serviceLogger *logging.ServiceLogger) Proxies { + var proxies Proxies + // configure proxies for default &/or pruning cluster routing if config.EnableHeightBasedRouting { - serviceLogger.Debug().Msg("configuring reverse proxies based on host AND height") - return newHeightShardingProxies(config, serviceLogger) + serviceLogger.Debug().Msg("configuring reverse proxies based on host AND height (pruning or default)") + proxies = newPruningOrDefaultProxies(config, serviceLogger) + } else { + serviceLogger.Debug().Msg("configuring reverse proxies based solely on request host") + proxies = newHostProxies(ResponseBackendDefault, config.ProxyBackendHostURLMapParsed, serviceLogger) } - serviceLogger.Debug().Msg("configuring reverse proxies based solely on request host") - return newHostProxies(ResponseBackendDefault, config.ProxyBackendHostURLMapParsed, serviceLogger) + + // wrap the baseline proxies with shard info if enabled + if config.EnableShardedRouting { + return newShardProxies(config.ProxyShardBackendHostURLMap, proxies, serviceLogger) + } + return proxies } // HostProxies chooses a proxy based solely on the Host of the incoming request, diff --git a/service/proxy_test.go b/service/proxy_test.go index 15022bf..fb5d2f2 100644 --- a/service/proxy_test.go +++ b/service/proxy_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func newConfig(t *testing.T, defaultHostMap string, pruningHostMap string) config.Config { +func newConfig(t *testing.T, defaultHostMap string, pruningHostMap string, shardHostMap string) config.Config { parsed, err := config.ParseRawProxyBackendHostURLMap(defaultHostMap) require.NoError(t, err) result := config.Config{ @@ -27,27 +27,39 @@ func newConfig(t *testing.T, defaultHostMap string, pruningHostMap string) confi result.ProxyPruningBackendHostURLMap, err = config.ParseRawProxyBackendHostURLMap(pruningHostMap) require.NoError(t, err) } + if shardHostMap != "" { + result.EnableShardedRouting = true + result.ProxyShardBackendHostURLMapRaw = shardHostMap + result.ProxyShardBackendHostURLMap, err = config.ParseRawShardRoutingBackendHostURLMap(shardHostMap) + require.NoError(t, err) + } return result } func TestUnitTest_NewProxies(t *testing.T) { t.Run("returns a HostProxies when sharding disabled", func(t *testing.T) { - config := newConfig(t, dummyConfig.ProxyBackendHostURLMapRaw, "") + config := newConfig(t, dummyConfig.ProxyBackendHostURLMapRaw, "", "") proxies := service.NewProxies(config, dummyLogger) require.IsType(t, service.HostProxies{}, proxies) }) - t.Run("returns a HeightShardingProxies when sharding enabled", func(t *testing.T) { - config := newConfig(t, dummyConfig.ProxyBackendHostURLMapRaw, dummyConfig.ProxyPruningBackendHostURLMapRaw) + t.Run("returns a PruningOrDefaultProxies when height-based routing enabled", func(t *testing.T) { + config := newConfig(t, dummyConfig.ProxyBackendHostURLMapRaw, dummyConfig.ProxyPruningBackendHostURLMapRaw, "") + proxies := service.NewProxies(config, dummyLogger) + require.IsType(t, service.PruningOrDefaultProxies{}, proxies) + }) + + t.Run("returns a ShardProxies when sharding enabled", func(t *testing.T) { + config := newConfig(t, dummyConfig.ProxyBackendHostURLMapRaw, "", dummyConfig.ProxyShardBackendHostURLMapRaw) proxies := service.NewProxies(config, dummyLogger) - require.IsType(t, service.HeightShardingProxies{}, proxies) + require.IsType(t, service.ShardProxies{}, proxies) }) } func TestUnitTest_HostProxies(t *testing.T) { config := newConfig(t, "magic.kava.io>magicalbackend.kava.io,archive.kava.io>archivenode.kava.io,pruning.kava.io>pruningnode.kava.io", - "", + "", "", ) proxies := service.NewProxies(config, dummyLogger) diff --git a/service/service_test.go b/service/service_test.go index 9733509..bd2d005 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -15,6 +15,7 @@ var ( testDefaultContext = context.TODO() proxyServiceDefaultURLMapRaw = os.Getenv("TEST_PROXY_BACKEND_HOST_URL_MAP") proxyServicePruningURLMapRaw = os.Getenv("TEST_PROXY_PRUNING_BACKEND_HOST_URL_MAP") + proxyServiceShardURLMapRaw = os.Getenv("TEST_PROXY_SHARD_BACKEND_HOST_URL_MAP") databaseName = os.Getenv("DATABASE_NAME") databaseUsername = os.Getenv("DATABASE_USERNAME") databasePassword = os.Getenv("DATABASE_PASSWORD") @@ -23,7 +24,6 @@ var ( evmQueryServiceURL = os.Getenv("TEST_EVM_QUERY_SERVICE_URL") dummyConfig = func() config.Config { - proxyBackendHostURLMapParsed, err := config.ParseRawProxyBackendHostURLMap(proxyServiceDefaultURLMapRaw) if err != nil { panic(err) @@ -32,12 +32,18 @@ var ( if err != nil { panic(err) } + proxyShardBackendHostURLMapParsed, err := config.ParseRawShardRoutingBackendHostURLMap(proxyServiceShardURLMapRaw) + if err != nil { + panic(err) + } conf := config.Config{ ProxyBackendHostURLMapRaw: proxyServiceDefaultURLMapRaw, ProxyBackendHostURLMapParsed: proxyBackendHostURLMapParsed, ProxyPruningBackendHostURLMapRaw: proxyServicePruningURLMapRaw, ProxyPruningBackendHostURLMap: proxyPruningBackendHostURLMapParsed, + ProxyShardBackendHostURLMapRaw: proxyServiceShardURLMapRaw, + ProxyShardBackendHostURLMap: proxyShardBackendHostURLMapParsed, DatabaseName: databaseName, DatabaseUserName: databaseUsername, diff --git a/service/shard.go b/service/shard.go index 5924065..c7e79d0 100644 --- a/service/shard.go +++ b/service/shard.go @@ -4,29 +4,30 @@ import ( "fmt" "net/http" "net/http/httputil" + "net/url" "github.com/kava-labs/kava-proxy-service/config" "github.com/kava-labs/kava-proxy-service/decode" "github.com/kava-labs/kava-proxy-service/logging" ) -// HeightShardingProxies routes traffic based on the host _and_ the height of the query. +// PruningOrDefaultProxies routes traffic based on the host _and_ the height of the query. // If the height is "latest" (or equivalent), return Pruning node proxy host. // Otherwise return default node proxy host. -type HeightShardingProxies struct { +type PruningOrDefaultProxies struct { *logging.ServiceLogger pruningProxies HostProxies defaultProxies HostProxies } -var _ Proxies = HeightShardingProxies{} +var _ Proxies = PruningOrDefaultProxies{} // ProxyForRequest implements Proxies. // Decodes height of request // - routes to Pruning proxy if defined & height is "latest" // - otherwise routes to Default proxy -func (hsp HeightShardingProxies) ProxyForRequest(r *http.Request) (*httputil.ReverseProxy, ProxyMetadata, bool) { +func (hsp PruningOrDefaultProxies) ProxyForRequest(r *http.Request) (*httputil.ReverseProxy, ProxyMetadata, bool) { _, _, found := hsp.pruningProxies.ProxyForRequest(r) // if the host isn't in the pruning proxies, short circuit fallback to default if !found { @@ -38,7 +39,7 @@ func (hsp HeightShardingProxies) ProxyForRequest(r *http.Request) (*httputil.Rev req := r.Context().Value(DecodedRequestContextKey) decodedReq, ok := (req).(*decode.EVMRPCRequestEnvelope) if !ok { - hsp.Trace().Msg("HeightShardingProxies failed to find & cast the decoded request envelope from the request context") + hsp.Trace().Msg("PruningOrDefaultProxies failed to find & cast the decoded request envelope from the request context") return hsp.defaultProxies.ProxyForRequest(r) } @@ -70,9 +71,9 @@ func (hsp HeightShardingProxies) ProxyForRequest(r *http.Request) (*httputil.Rev return hsp.defaultProxies.ProxyForRequest(r) } -// newHeightShardingProxies creates a new HeightShardingProxies from the service config. -func newHeightShardingProxies(config config.Config, serviceLogger *logging.ServiceLogger) HeightShardingProxies { - return HeightShardingProxies{ +// newPruningOrDefaultProxies creates a new PruningOrDefaultProxies from the service config. +func newPruningOrDefaultProxies(config config.Config, serviceLogger *logging.ServiceLogger) PruningOrDefaultProxies { + return PruningOrDefaultProxies{ ServiceLogger: serviceLogger, pruningProxies: newHostProxies(ResponseBackendPruning, config.ProxyPruningBackendHostURLMap, serviceLogger), defaultProxies: newHostProxies(ResponseBackendDefault, config.ProxyBackendHostURLMapParsed, serviceLogger), @@ -94,3 +95,90 @@ var blockTagEncodingsRoutedToLatest = map[int64]bool{ func shouldRouteToPruning(encodedHeight int64) bool { return blockTagEncodingsRoutedToLatest[encodedHeight] } + +// ShardProxies handles routing requests for specific heights to backends that contain the height. +// The height is parsed out of requests that would route to the default backend of the underlying `defaultProxies` +// If the height is contained by a backend in the host's IntervalURLMap, it is routed to that url. +// Otherwise, it forwards the request via the wrapped defaultProxies. +type ShardProxies struct { + *logging.ServiceLogger + + defaultProxies Proxies + shardsByHost map[string]config.IntervalURLMap + proxyByURL map[*url.URL]*httputil.ReverseProxy +} + +var _ Proxies = ShardProxies{} + +// ProxyForRequest implements Proxies. +func (sp ShardProxies) ProxyForRequest(r *http.Request) (*httputil.ReverseProxy, ProxyMetadata, bool) { + // short circuit if host not in shards map + shardsForHost, found := sp.shardsByHost[r.Host] + if !found { + return sp.defaultProxies.ProxyForRequest(r) + } + + // handle unsupported hosts or routing to pruning (if enabled) + proxy, metadata, found := sp.defaultProxies.ProxyForRequest(r) + if metadata.BackendName != ResponseBackendDefault || !found { + return proxy, metadata, found + } + + // get decoded request + req := r.Context().Value(DecodedRequestContextKey) + decodedReq, ok := (req).(*decode.EVMRPCRequestEnvelope) + if !ok { + sp.Trace().Msg("PruningOrDefaultProxies failed to find & cast the decoded request envelope from the request context") + return sp.defaultProxies.ProxyForRequest(r) + } + + // parse height from the request + parsedHeight, err := decode.ParseBlockNumberFromParams(decodedReq.Method, decodedReq.Params) + if err != nil { + sp.Error().Msg(fmt.Sprintf("expected but failed to parse block number for %+v: %s", decodedReq, err)) + return sp.defaultProxies.ProxyForRequest(r) + } + + // handle encoded block numbers + height := parsedHeight + if height == decode.BlockTagToNumberCodec[decode.BlockTagEarliest] { + // convert "earliest" to "1" so it routes to first shard + height = 1 + } else if parsedHeight < 1 { + // route all other encoded tags to default proxy. + // in practice, this is unreachable because they will be handled by the pruning Proxies + // if shard routing is enabled without PruningOrDefaultProxies, this handles all special block tags + return sp.defaultProxies.ProxyForRequest(r) + } + + // look for shard including height + url, shardHeight, found := shardsForHost.Lookup(uint64(height)) + if !found { + return sp.defaultProxies.ProxyForRequest(r) + } + + // shard exists, route to it! + metadata = ProxyMetadata{ + BackendName: ResponseBackendShard, + BackendRoute: *url, + ShardEndHeight: shardHeight, + } + return sp.proxyByURL[url], metadata, true +} + +func newShardProxies(shardHostMap map[string]config.IntervalURLMap, beyondShardProxies Proxies, serviceLogger *logging.ServiceLogger) ShardProxies { + // create reverse proxy for each backend url + proxyByURL := make(map[*url.URL]*httputil.ReverseProxy) + for _, shards := range shardHostMap { + for _, route := range shards.UrlByEndHeight { + proxyByURL[route] = httputil.NewSingleHostReverseProxy(route) + } + } + + return ShardProxies{ + ServiceLogger: serviceLogger, + shardsByHost: shardHostMap, + defaultProxies: beyondShardProxies, + proxyByURL: proxyByURL, + } +} diff --git a/service/shard_test.go b/service/shard_test.go index 2f80405..9927940 100644 --- a/service/shard_test.go +++ b/service/shard_test.go @@ -9,14 +9,16 @@ import ( "github.com/stretchr/testify/require" ) -func TestUnitTest_HeightShardingProxies(t *testing.T) { +func TestUnitTest_PruningOrDefaultProxies(t *testing.T) { archiveBackend := "archivenode.kava.io/" pruningBackend := "pruningnode.kava.io/" config := newConfig(t, fmt.Sprintf("archive.kava.io>%s,pruning.kava.io>%s", archiveBackend, pruningBackend), fmt.Sprintf("archive.kava.io>%s", pruningBackend), + "", ) proxies := service.NewProxies(config, dummyLogger) + require.IsType(t, service.PruningOrDefaultProxies{}, proxies) testCases := []struct { name string @@ -154,8 +156,223 @@ func TestUnitTest_HeightShardingProxies(t *testing.T) { } require.True(t, found, "expected proxy to be found") require.NotNil(t, proxy) - require.Equal(t, metadata.BackendName, tc.expectBackend) - require.Equal(t, metadata.BackendRoute.String(), tc.expectRoute) + require.Equal(t, tc.expectBackend, metadata.BackendName) + require.Equal(t, tc.expectRoute, metadata.BackendRoute.String()) + requireProxyRoutesToUrl(t, proxy, req, tc.expectRoute) + }) + } +} + +// shard proxies with a pruning underlying proxy expects the same as above +// except that requests for specific heights that fall within a shard route to that shard. +func TestUnitTest_ShardProxies(t *testing.T) { + archiveBackend := "archivenode.kava.io/" + pruningBackend := "pruningnode.kava.io/" + shard1Backend := "shard-1.kava.io/" + shard2Backend := "shard-2.kava.io/" + config := newConfig(t, + fmt.Sprintf("archive.kava.io>%s,pruning.kava.io>%s", archiveBackend, pruningBackend), + fmt.Sprintf("archive.kava.io>%s", pruningBackend), + fmt.Sprintf("archive.kava.io>10|%s|20|%s", shard1Backend, shard2Backend), + ) + proxies := service.NewProxies(config, dummyLogger) + require.IsType(t, service.ShardProxies{}, proxies) + + testCases := []struct { + name string + url string + req *decode.EVMRPCRequestEnvelope + expectFound bool + expectBackend string + expectRoute string + }{ + // DEFAULT ROUTE CASES + { + name: "routes to default when not in pruning or shard map", + url: "//pruning.kava.io", + req: &decode.EVMRPCRequestEnvelope{}, + expectFound: true, + expectBackend: service.ResponseBackendDefault, + expectRoute: pruningBackend, + }, + { + name: "routes to default for specific height beyond latest shard", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0xbaddad", false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendDefault, + expectRoute: archiveBackend, + }, + { + name: "routes to default for methods that don't have block number", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByHash", + Params: []interface{}{"0xe9bd10bc1d62b4406dd1fb3dbf3adb54f640bdb9ebbe3dd6dfc6bcc059275e54", false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendDefault, + expectRoute: archiveBackend, + }, + { + name: "routes to default if it fails to decode req", + url: "//archive.kava.io", + req: nil, + expectFound: true, + expectBackend: service.ResponseBackendDefault, + expectRoute: archiveBackend, + }, + { + name: "routes to default if it fails to parse block number", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"not-a-block-tag", false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendDefault, + expectRoute: archiveBackend, + }, + + // PRUNING ROUTE CASES + { + name: "routes to pruning for 'latest' block", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"latest", false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendPruning, + expectRoute: pruningBackend, + }, + { + name: "routes to pruning when block number empty", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{nil, false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendPruning, + expectRoute: pruningBackend, + }, + { + name: "routes to pruning for no-history methods", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_chainId", + }, + expectFound: true, + expectBackend: service.ResponseBackendPruning, + expectRoute: pruningBackend, + }, + { + // this is just another example of the above, but worth pointing out! + name: "routes to pruning when sending txs", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_sendTransaction", + Params: []interface{}{ + map[string]string{ + "from": "0xdeadbeef00000000000000000000000000000123", + "to": "0xbaddad0000000000000000000000000000000123", + "value": "0x1", + "gas": "0xeeee", + "gasPrice": "0x12345678900", + "nonce": "0x0", + }, + }, + }, + expectFound: true, + expectBackend: service.ResponseBackendPruning, + expectRoute: pruningBackend, + }, + + // SHARD ROUTE CASES + { + name: "routes to 1st shard for 'earliest' block", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"earliest", false}, + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard1Backend, + }, + { + name: "routes to shard 1 for specific height in shard 1", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0x5", false}, // block 5 + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard1Backend, + }, + { + name: "end block of shard 1 routes to shard 1", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0xA", false}, // block 10 + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard1Backend, + }, + { + name: "first block of shard 2 routes to shard 2", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0xB", false}, // block 11 + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard2Backend, + }, + { + name: "routes to shard 2 for specific height in shard 2", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0xF", false}, // block 15 + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard2Backend, + }, + { + name: "end block of shard 2 routes to shard 2", + url: "//archive.kava.io", + req: &decode.EVMRPCRequestEnvelope{ + Method: "eth_getBlockByNumber", + Params: []interface{}{"0x14", false}, // block 20 + }, + expectFound: true, + expectBackend: service.ResponseBackendShard, + expectRoute: shard2Backend, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := mockJsonRpcReqToUrl(tc.url, tc.req) + proxy, metadata, found := proxies.ProxyForRequest(req) + if !tc.expectFound { + require.False(t, found, "expected proxy not to be found") + return + } + require.True(t, found, "expected proxy to be found") + require.NotNil(t, proxy) + require.Equal(t, tc.expectBackend, metadata.BackendName) + require.Equal(t, tc.expectRoute, metadata.BackendRoute.String()) requireProxyRoutesToUrl(t, proxy, req, tc.expectRoute) }) }