diff --git a/.github/workflows/ci-e2e-no-metrics-tests.yml b/.github/workflows/ci-e2e-no-metrics-tests.yml new file mode 100644 index 0000000..77e483a --- /dev/null +++ b/.github/workflows/ci-e2e-no-metrics-tests.yml @@ -0,0 +1,32 @@ +name: Continuous Integration (E2E Testing Checks without metrics database) + +on: + workflow_call: +jobs: + e2e-no-metrics-test: + runs-on: ubuntu-latest + steps: + - name: checkout repo from current commit + uses: actions/checkout@v3 + - name: set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.21" + check-latest: true + cache: false + - name: pull pre-built images + run: sudo docker compose -f ci.docker-compose.yml pull + - name: build and start proxy service and it's dependencies + # We need to provide additional env file to override the METRIC_DATABASE_ENABLED variable, not via env variable. + # Mentioned here: https://github.com/docker/compose/issues/9737 + run: sudo docker compose -f ci.docker-compose.yml --env-file .env --env-file no_metric.env up -d --build proxy redis + - name: wait for proxy service to be running + run: bash ${GITHUB_WORKSPACE}/scripts/wait-for-proxy-service-running.sh + env: + PROXY_CONTAINER_PORT: 7777 + - name: run e2e tests + run: SKIP_METRICS=true make e2e-test + - name: print proxy service logs + run: sudo docker compose -f ci.docker-compose.yml logs proxy + # because we especially want the logs if the test(s) fail 😅 + if: always() diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml index 5e5eb5c..004b310 100644 --- a/.github/workflows/ci-main.yml +++ b/.github/workflows/ci-main.yml @@ -11,10 +11,14 @@ jobs: # run default ci checks against main branch default-checks: uses: ./.github/workflows/ci-default.yml - # run e2e testing ci checks against main branch + # run e2e testing ci for internal testnet checks against main branch e2e-tests: needs: [lint-checks, default-checks] uses: ./.github/workflows/ci-e2e-tests.yml + # run e2e testing without metrics db ci for internal testnet checks against main branch + e2e-no-metrics-tests: + needs: [lint-checks, default-checks] + uses: ./.github/workflows/ci-e2e-no-metrics-tests.yml # build, tag and publish new service docker images release-docker-images: needs: [e2e-tests] diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml index d00f7ff..9a0b81b 100644 --- a/.github/workflows/ci-pr.yml +++ b/.github/workflows/ci-pr.yml @@ -11,3 +11,5 @@ jobs: uses: ./.github/workflows/ci-default.yml e2e-tests: uses: ./.github/workflows/ci-e2e-tests.yml + e2e-no-metrics-tests: + uses: ./.github/workflows/ci-e2e-no-metrics-tests.yml diff --git a/Makefile b/Makefile index 6e3608c..f9c3a2b 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,11 @@ unit-test: e2e-test: go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETest*" +.PHONY: e2e-test-no-metrics +# run tests that execute against a local or remote instance of the API without database for metrics +e2e-test-no-metrics: + SKIP_METRICS=true go test -count=1 -v -cover -coverprofile cover.out --race ./... -run "^TestE2ETest*" + .PHONY: ci-setup # set up your local environment such that running `make e2e-test` runs against testnet (like in CI) ci-setup: diff --git a/ci.docker-compose.yml b/ci.docker-compose.yml index 51e22b1..bf52bfc 100644 --- a/ci.docker-compose.yml +++ b/ci.docker-compose.yml @@ -2,7 +2,7 @@ services: # run postgres for proxy service to store observability metrics postgres: - image: postgres:15 + image: postgres:13.12 env_file: .env ports: - "${POSTGRES_HOST_PORT}:${POSTGRES_CONTAINER_PORT}" @@ -32,6 +32,9 @@ services: # fake the shards by defining shards with existing backends PROXY_SHARD_BACKEND_HOST_URL_MAP: localhost:7777>10|https://evmrpcdata.internal.testnet.proxy.kava.io|20|https://evmrpcdata.internal.testnet.proxy.kava.io EVM_QUERY_SERVICE_URL: https://evmrpc.internal.testnet.proxy.kava.io + # we need the metric to be used from no_metric.env or by default set up as true, so to test the metrics collection. + # doesn't work with the env variable, so need env file. Mentioned here: https://github.com/docker/compose/issues/9737 + METRIC_DATABASE_ENABLED: "${METRIC_DATABASE_ENABLED}" ports: - "${PROXY_HOST_PORT}:${PROXY_CONTAINER_PORT}" - "${TEST_UNCONFIGURED_PROXY_PORT}:${PROXY_CONTAINER_PORT}" diff --git a/clients/database/database.go b/clients/database/database.go index acd40c1..bdff4e0 100644 --- a/clients/database/database.go +++ b/clients/database/database.go @@ -14,7 +14,11 @@ import ( // that haven't been run on the database being used by the proxy service // returning error (if any) and a list of migrations that have been // run and any that were not +// If db is nil, returns empty slice and nil error, as there is no database to migrate. func Migrate(ctx context.Context, db *bun.DB, migrations migrate.Migrations, logger *logging.ServiceLogger) (*migrate.MigrationSlice, error) { + if db == nil { + return &migrate.MigrationSlice{}, nil + } // set up migration config migrator := migrate.NewMigrator(db, &migrations) diff --git a/clients/database/database_test.go b/clients/database/database_test.go new file mode 100644 index 0000000..20e779d --- /dev/null +++ b/clients/database/database_test.go @@ -0,0 +1,14 @@ +package database + +import ( + "context" + "github.com/stretchr/testify/require" + "github.com/uptrace/bun/migrate" + "testing" +) + +func TestMigrateNoDatabase(t *testing.T) { + migrations, err := Migrate(context.Background(), nil, migrate.Migrations{}, nil) + require.NoError(t, err) + require.Empty(t, migrations) +} diff --git a/clients/database/postgres.go b/clients/database/postgres.go index 7133831..02b0195 100644 --- a/clients/database/postgres.go +++ b/clients/database/postgres.go @@ -16,12 +16,15 @@ import ( // PostgresDatabaseConfig contains values for creating a // new connection to a postgres database type PostgresDatabaseConfig struct { + // DatabaseDisabled is used to disable the database, and it won't be used at all. All operations will be skipped. + DatabaseDisabled bool + DatabaseName string DatabaseEndpointURL string DatabaseUsername string DatabasePassword string ReadTimeoutSeconds int64 - WriteTimeousSeconds int64 + WriteTimeoutSeconds int64 DatabaseMaxIdleConnections int64 DatabaseConnectionMaxIdleSeconds int64 DatabaseMaxOpenConnections int64 @@ -33,12 +36,19 @@ type PostgresDatabaseConfig struct { // PostgresClient wraps a connection to a postgres database type PostgresClient struct { + isDisabled bool *bun.DB } // NewPostgresClient returns a new connection to the specified // postgres data and error (if any) func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { + if config.DatabaseDisabled { + return PostgresClient{ + isDisabled: true, + }, nil + } + // configure postgres database connection options var pgOptions *pgdriver.Connector @@ -54,7 +64,7 @@ func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { pgdriver.WithPassword(config.DatabasePassword), pgdriver.WithDatabase(config.DatabaseName), pgdriver.WithReadTimeout(time.Second*time.Duration(config.ReadTimeoutSeconds)), - pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeousSeconds)), + pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeoutSeconds)), ) } else { pgOptions = pgdriver.NewConnector( @@ -64,7 +74,7 @@ func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { pgdriver.WithPassword(config.DatabasePassword), pgdriver.WithDatabase(config.DatabaseName), pgdriver.WithReadTimeout(time.Second*time.Duration(config.ReadTimeoutSeconds)), - pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeousSeconds)), + pgdriver.WithWriteTimeout(time.Second*time.Duration(config.WriteTimeoutSeconds)), ) } @@ -94,5 +104,9 @@ func NewPostgresClient(config PostgresDatabaseConfig) (PostgresClient, error) { // HealthCheck returns an error if the database can not // be connected to and queried, nil otherwise func (pg *PostgresClient) HealthCheck() error { + if pg.isDisabled { + return nil + } + return pg.Ping() } diff --git a/clients/database/postgres_test.go b/clients/database/postgres_test.go new file mode 100644 index 0000000..2cbbb7e --- /dev/null +++ b/clients/database/postgres_test.go @@ -0,0 +1,25 @@ +package database + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestDisabledDBCreation(t *testing.T) { + config := PostgresDatabaseConfig{ + DatabaseDisabled: true, + } + db, err := NewPostgresClient(config) + require.NoError(t, err) + require.True(t, db.isDisabled) +} + +func TestHealthcheckNoDatabase(t *testing.T) { + config := PostgresDatabaseConfig{ + DatabaseDisabled: true, + } + db, err := NewPostgresClient(config) + require.NoError(t, err) + err = db.HealthCheck() + require.NoError(t, err) +} diff --git a/clients/database/request_metric.go b/clients/database/request_metric.go index 2ec3e9c..9b98c12 100644 --- a/clients/database/request_metric.go +++ b/clients/database/request_metric.go @@ -34,8 +34,13 @@ type ProxiedRequestMetric struct { } // Save saves the current ProxiedRequestMetric to -// the database, returning error (if any) +// the database, returning error (if any). +// If db is nil, returns nil error. func (prm *ProxiedRequestMetric) Save(ctx context.Context, db *bun.DB) error { + if db == nil { + return nil + } + _, err := db.NewInsert().Model(prm).Exec(ctx) return err @@ -44,8 +49,13 @@ func (prm *ProxiedRequestMetric) Save(ctx context.Context, db *bun.DB) error { // ListProxiedRequestMetricsWithPagination returns a page of max // `limit` ProxiedRequestMetrics from the offset specified by`cursor` // error (if any) along with a cursor to use to fetch the next page -// if the cursor is 0 no more pages exists +// if the cursor is 0 no more pages exists. +// Uses only in tests. If db is nil, returns empty slice and 0 cursor. func ListProxiedRequestMetricsWithPagination(ctx context.Context, db *bun.DB, cursor int64, limit int) ([]ProxiedRequestMetric, int64, error) { + if db == nil { + return []ProxiedRequestMetric{}, 0, nil + } + var proxiedRequestMetrics []ProxiedRequestMetric var nextCursor int64 @@ -62,8 +72,13 @@ func ListProxiedRequestMetricsWithPagination(ctx context.Context, db *bun.DB, cu // CountAttachedProxiedRequestMetricPartitions returns the current // count of attached partitions for the ProxiedRequestMetricsTableName -// and error (if any) +// and error (if any). +// If db is nil, returns 0 and nil error. func CountAttachedProxiedRequestMetricPartitions(ctx context.Context, db *bun.DB) (int64, error) { + if db == nil { + return 0, nil + } + var count int64 countPartitionsQuery := fmt.Sprintf(` @@ -88,7 +103,12 @@ func CountAttachedProxiedRequestMetricPartitions(ctx context.Context, db *bun.DB // GetLastCreatedAttachedProxiedRequestMetricsPartitionName gets the table name // for the last created (and attached) proxied request metrics partition +// Used for status check. If db is nil, returns empty string and nil error. func GetLastCreatedAttachedProxiedRequestMetricsPartitionName(ctx context.Context, db *bun.DB) (string, error) { + if db == nil { + return "", nil + } + var lastCreatedAttachedPartitionName string lastCreatedAttachedPartitionNameQuery := fmt.Sprintf(` @@ -114,8 +134,13 @@ WHERE parent.relname='%s' order by child.oid desc limit 1;`, ProxiedRequestMetri // DeleteProxiedRequestMetricsOlderThanNDays deletes // all proxied request metrics older than the specified -// days, returning error (if any) +// days, returning error (if any). +// Used during pruning process. If db is nil, returns nil error. func DeleteProxiedRequestMetricsOlderThanNDays(ctx context.Context, db *bun.DB, n int64) error { + if db == nil { + return nil + } + _, err := db.NewDelete().Model((*ProxiedRequestMetric)(nil)).Where(fmt.Sprintf("request_time < now() - interval '%d' day", n)).Exec(ctx) return err diff --git a/clients/database/request_metric_test.go b/clients/database/request_metric_test.go new file mode 100644 index 0000000..37afe30 --- /dev/null +++ b/clients/database/request_metric_test.go @@ -0,0 +1,37 @@ +package database + +import ( + "context" + "github.com/stretchr/testify/require" + "testing" +) + +func TestNoDatabaseSave(t *testing.T) { + prm := ProxiedRequestMetric{} + err := prm.Save(context.Background(), nil) + require.NoError(t, err) +} + +func TestNoDatabaseListProxiedRequestMetricsWithPagination(t *testing.T) { + proxiedRequestMetrics, cursor, err := ListProxiedRequestMetricsWithPagination(context.Background(), nil, 0, 0) + require.NoError(t, err) + require.Empty(t, proxiedRequestMetrics) + require.Zero(t, cursor) +} + +func TestNoDatabaseCountAttachedProxiedRequestMetricPartitions(t *testing.T) { + count, err := CountAttachedProxiedRequestMetricPartitions(context.Background(), nil) + require.NoError(t, err) + require.Zero(t, count) +} + +func TestGetLastCreatedAttachedProxiedRequestMetricsPartitionName(t *testing.T) { + partitionName, err := GetLastCreatedAttachedProxiedRequestMetricsPartitionName(context.Background(), nil) + require.NoError(t, err) + require.Empty(t, partitionName) +} + +func TestDeleteProxiedRequestMetricsOlderThanNDays(t *testing.T) { + err := DeleteProxiedRequestMetricsOlderThanNDays(context.Background(), nil, 0) + require.NoError(t, err) +} diff --git a/config/config.go b/config/config.go index 3adcff8..029c349 100644 --- a/config/config.go +++ b/config/config.go @@ -48,6 +48,7 @@ type Config struct { MetricPruningRoutineInterval time.Duration MetricPruningRoutineDelayFirstRun time.Duration MetricPruningMaxRequestMetricsHistoryDays int + MetricDatabaseEnabled bool CacheEnabled bool RedisEndpointURL string RedisPassword string @@ -101,6 +102,8 @@ const ( DEFAULT_METRIC_PRUNING_ENABLED = true METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS_ENVIRONMENT_KEY = "METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS" // 60 seconds * 60 minutes * 24 hours = 1 day + METRIC_DATABASE_ENABLED_ENVIRONMENT_KEY = "METRIC_DATABASE_ENABLED" + DEFAULT_METRIC_DATABASE_ENABLED = true DEFAULT_METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS = 86400 METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS_ENVIRONMENT_KEY = "METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS" DEFAULT_METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS = 10 @@ -380,6 +383,7 @@ func ReadConfig() Config { MetricPruningRoutineInterval: time.Duration(time.Duration(EnvOrDefaultInt(METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_ROUTINE_INTERVAL_SECONDS)) * time.Second), MetricPruningRoutineDelayFirstRun: time.Duration(time.Duration(EnvOrDefaultInt(METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_ROUTINE_DELAY_FIRST_RUN_SECONDS)) * time.Second), MetricPruningMaxRequestMetricsHistoryDays: EnvOrDefaultInt(METRIC_PRUNING_MAX_REQUEST_METRICS_HISTORY_DAYS_ENVIRONMENT_KEY, DEFAULT_METRIC_PRUNING_MAX_REQUEST_METRICS_HISTORY_DAYS), + MetricDatabaseEnabled: EnvOrDefaultBool(METRIC_DATABASE_ENABLED_ENVIRONMENT_KEY, DEFAULT_METRIC_DATABASE_ENABLED), CacheEnabled: EnvOrDefaultBool(CACHE_ENABLED_ENVIRONMENT_KEY, false), RedisEndpointURL: os.Getenv(REDIS_ENDPOINT_URL_ENVIRONMENT_KEY), RedisPassword: os.Getenv(REDIS_PASSWORD_ENVIRONMENT_KEY), diff --git a/docker-compose.yml b/docker-compose.yml index 178fba2..b09fd5e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ services: # run postgres for proxy service to store observability metrics postgres: - image: postgres:15 + image: postgres:13.12 env_file: .env ports: - "${POSTGRES_HOST_PORT}:${POSTGRES_CONTAINER_PORT}" diff --git a/main.go b/main.go index 62733b2..78ac832 100644 --- a/main.go +++ b/main.go @@ -37,6 +37,12 @@ func init() { } func startMetricPartitioningRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { + if !serviceConfig.MetricDatabaseEnabled { + serviceLogger.Info().Msg("skipping starting metric partitioning routine since it is disabled via config") + + return nil + } + metricPartitioningRoutineConfig := routines.MetricPartitioningRoutineConfig{ Interval: serviceConfig.MetricPartitioningRoutineInterval, DelayFirstRun: serviceConfig.MetricPartitioningRoutineDelayFirstRun, @@ -67,6 +73,12 @@ func startMetricPartitioningRoutine(serviceConfig config.Config, service service } func startMetricCompactionRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { + if !serviceConfig.MetricDatabaseEnabled { + serviceLogger.Info().Msg("skipping starting metric compaction routine since it is disabled via config") + + return nil + } + metricCompactionRoutineConfig := routines.MetricCompactionRoutineConfig{ Interval: serviceConfig.MetricCompactionRoutineInterval, Database: service.Database, @@ -95,8 +107,9 @@ func startMetricCompactionRoutine(serviceConfig config.Config, service service.P } func startMetricPruningRoutine(serviceConfig config.Config, service service.ProxyService, serviceLogger logging.ServiceLogger) <-chan error { - if !serviceConfig.MetricPruningEnabled { + if !serviceConfig.MetricPruningEnabled || !serviceConfig.MetricDatabaseEnabled { serviceLogger.Info().Msg("skipping starting metric pruning routine since it is disabled via config") + return make(<-chan error) } diff --git a/main_batch_test.go b/main_batch_test.go index 0dec9f8..672273d 100644 --- a/main_batch_test.go +++ b/main_batch_test.go @@ -189,6 +189,7 @@ func TestE2ETest_ValidBatchEvmRequests(t *testing.T) { for _, tc := range testCases { startTime := time.Now() + time.Sleep(1 * time.Second) // ensure startTime will be far from metrics starting t.Run(tc.name, func(t *testing.T) { reqInJSON, err := json.Marshal(tc.req) require.NoError(t, err) @@ -230,6 +231,10 @@ func TestE2ETest_ValidBatchEvmRequests(t *testing.T) { require.Equal(t, resp.Header[accessControlAllowOriginHeaderName], []string{"*"}) } + if shouldSkipMetrics() { + return + } + // wait for all metrics to be created. // besides verification, waiting for the metrics ensures future tests don't fail b/c metrics are being processed waitForMetricsInWindow(t, tc.expectedNumMetrics, db, startTime, []string{}) diff --git a/main_test.go b/main_test.go index 6f554bd..5ad4b40 100644 --- a/main_test.go +++ b/main_test.go @@ -147,9 +147,9 @@ func waitForMetricsInWindow( startTime time.Time, testedmethods []string, ) (metrics []database.ProxiedRequestMetric) { - timeoutMin := 1 * time.Second + timeoutMin := 2 * time.Second // scale the timeout by the number of expected requests, or at least 1 second - timeout := time.Duration(expected+1) * 100 * time.Millisecond + timeout := time.Duration(expected+1)*100*time.Millisecond + time.Second if timeout < timeoutMin { timeout = timeoutMin } @@ -208,11 +208,16 @@ func TestE2ETestProxyCreatesRequestMetricForEachRequest(t *testing.T) { // make request to api and track start / end time of the request to startTime := time.Now() + time.Sleep(1 * time.Second) _, err = client.HeaderByNumber(testContext, nil) require.NoError(t, err) + if shouldSkipMetrics() { + return + } + requestMetricsDuringRequestWindow := waitForMetricsInWindow(t, 1, databaseClient, startTime, []string{testEthMethodName}) requestMetricDuringRequestWindow := requestMetricsDuringRequestWindow[0] @@ -248,11 +253,16 @@ func TestE2ETestProxyTracksBlockNumberForEth_getBlockByNumberRequest(t *testing. // make request to api and track start / end time of the request to startTime := time.Now() + time.Sleep(1 * time.Second) _, err = client.HeaderByNumber(testContext, requestBlockNumber) require.NoError(t, err) + if shouldSkipMetrics() { + return + } + requestMetricsDuringRequestWindow := waitForMetricsInWindow( t, 1, databaseClient, startTime, []string{testEthMethodName}, ) @@ -277,12 +287,17 @@ func TestE2ETestProxyTracksBlockTagForEth_getBlockByNumberRequest(t *testing.T) // make request to api and track start / end time of the request to startTime := time.Now() + time.Sleep(1 * time.Second) // will default to latest _, err = client.HeaderByNumber(testContext, nil) require.NoError(t, err) + if shouldSkipMetrics() { + return + } + requestMetricsDuringRequestWindow := waitForMetricsInWindow( t, 1, databaseClient, startTime, []string{testEthMethodName}, ) @@ -321,6 +336,7 @@ func TestE2ETestProxyTracksBlockNumberForMethodsWithBlockNumberParam(t *testing. // for each request whether the kava node api returns an error or not // and if it doesn't the test itself will fail due to missing metrics startTime := time.Now() + time.Sleep(1 * time.Second) // eth_getBalance _, _ = client.BalanceAt(testContext, testAddress, requestBlockNumber) @@ -343,6 +359,10 @@ func TestE2ETestProxyTracksBlockNumberForMethodsWithBlockNumberParam(t *testing. // eth_call _, _ = client.CallContract(testContext, ethereum.CallMsg{}, requestBlockNumber) + if shouldSkipMetrics() { + return + } + requestMetricsDuringRequestWindow := waitForMetricsInWindow( t, 7, databaseClient, startTime, testedmethods, ) @@ -385,6 +405,7 @@ func TestE2ETestProxyTracksBlockNumberForMethodsWithBlockHashParam(t *testing.T) // for each request whether the kava node api returns an error or not // and if it doesn't the test itself will fail due to missing metrics startTime := time.Now() + time.Sleep(1 * time.Second) // eth_getBlockByHash _, _ = client.BlockByHash(testContext, requestBlockHash) @@ -395,6 +416,10 @@ func TestE2ETestProxyTracksBlockNumberForMethodsWithBlockHashParam(t *testing.T) // eth_getTransactionByBlockHashAndIndex _, _ = client.TransactionInBlock(testContext, requestBlockHash, 0) + if shouldSkipMetrics() { + return + } + requestMetricsDuringRequestWindow := waitForMetricsInWindow( t, 3, databaseClient, startTime, testedmethods, ) @@ -487,9 +512,14 @@ func TestE2ETest_HeightBasedRouting(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { startTime := time.Now() + time.Sleep(1 * time.Second) err := rpc.Call(nil, tc.method, tc.params...) require.NoError(t, err) + if shouldSkipMetrics() { + return + } + metrics := waitForMetricsInWindow(t, 1, databaseClient, startTime, []string{tc.method}) require.Len(t, metrics, 1) @@ -658,6 +688,7 @@ func TestE2ETestCachingMdwWithBlockNumberParam_Metrics(t *testing.T) { expectKeysNum(t, redisClient, 0) // startTime is a time before first request startTime := time.Now() + time.Sleep(1 * time.Second) for _, tc := range []struct { desc string @@ -721,6 +752,12 @@ func TestE2ETestCachingMdwWithBlockNumberParam_Metrics(t *testing.T) { require.Equal(t, block1, block2, "blocks should be the same") } + if shouldSkipMetrics() { + cleanUpRedis(t, redisClient) + + return + } + // get metrics between startTime & now for eth_getBlockByNumber requests filteredMetrics := waitForMetricsInWindow(t, 4, db, startTime, []string{"eth_getBlockByNumber"}) @@ -1180,6 +1217,10 @@ func cleanUpRedis(t *testing.T, redisClient *redis.Client) { } func cleanMetricsDb(t *testing.T, db database.PostgresClient) { + if shouldSkipMetrics() { + return + } + _, err := db.Exec("TRUNCATE proxied_request_metrics;") require.NoError(t, err) } @@ -1639,3 +1680,8 @@ func (tx *getTxReceiptByHashResponse) IsIncludedInBlock() bool { tx.Result.BlockNumber != "" && tx.Result.TransactionIndex != "" } + +func shouldSkipMetrics() bool { + // Check if the environment variable SKIP_METRICS is set to "true" + return os.Getenv("SKIP_METRICS") == "true" +} diff --git a/no_metric.env b/no_metric.env new file mode 100644 index 0000000..2e282f5 --- /dev/null +++ b/no_metric.env @@ -0,0 +1,6 @@ +##### Local development config, used for CI to disable metrics + +# If we use metric database. If set to false, +# all metric collection and connection to the DB would be disabled +# and the service would run without any metrics +METRIC_DATABASE_ENABLED=false \ No newline at end of file diff --git a/routines/metric_partitioning_test.go b/routines/metric_partitioning_test.go index 436b96f..a4400f2 100644 --- a/routines/metric_partitioning_test.go +++ b/routines/metric_partitioning_test.go @@ -31,6 +31,10 @@ var ( ) func TestE2ETestMetricPartitioningRoutinePrefillsExpectedPartitionsAfterStartupDelay(t *testing.T) { + if shouldSkipMetrics() { + t.Skip("Skipping test because environment variable SKIP_METRICS is set to true") + } + // prepare time.Sleep(time.Duration(MetricPartitioningRoutineDelayFirstRunSeconds) * time.Second) @@ -93,3 +97,8 @@ func TestUnitTestpartitionsForPeriodReturnsExpectedNumPartitionsWhenPrefillPerio assert.Nil(t, err) assert.Equal(t, daysToPrefill, len(actualPartitionsForPeriod)) } + +func shouldSkipMetrics() bool { + // Check if the environment variable SKIP_METRICS is set to "true" + return os.Getenv("SKIP_METRICS") == "true" +} diff --git a/service/service.go b/service/service.go index 7b41c6c..fc0a20a 100644 --- a/service/service.go +++ b/service/service.go @@ -138,10 +138,11 @@ func New(ctx context.Context, config config.Config, serviceLogger *logging.Servi // createDatabaseClient creates a connection to the database // using the specified config and runs migrations async -// (only if migration flag in config is true) returning the +// (only if migration flag in config is true) // returning the database connection and error (if any) func createDatabaseClient(ctx context.Context, config config.Config, logger *logging.ServiceLogger) (*database.PostgresClient, error) { databaseConfig := database.PostgresDatabaseConfig{ + DatabaseDisabled: !config.MetricDatabaseEnabled, DatabaseName: config.DatabaseName, DatabaseEndpointURL: config.DatabaseEndpointURL, DatabaseUsername: config.DatabaseUserName, @@ -149,7 +150,7 @@ func createDatabaseClient(ctx context.Context, config config.Config, logger *log SSLEnabled: config.DatabaseSSLEnabled, QueryLoggingEnabled: config.DatabaseQueryLoggingEnabled, ReadTimeoutSeconds: config.DatabaseReadTimeoutSeconds, - WriteTimeousSeconds: config.DatabaseWriteTimeoutSeconds, + WriteTimeoutSeconds: config.DatabaseWriteTimeoutSeconds, DatabaseMaxIdleConnections: config.DatabaseMaxIdleConnections, DatabaseConnectionMaxIdleSeconds: config.DatabaseConnectionMaxIdleSeconds, DatabaseMaxOpenConnections: config.DatabaseMaxOpenConnections, @@ -173,7 +174,7 @@ func createDatabaseClient(ctx context.Context, config config.Config, logger *log // run migrations async so waiting for the database to // be reachable doesn't block the ability of the proxy service // to degrade gracefully and continue to proxy requests even - // without it's database + // without its database go func() { // wait for database to be reachable var databaseOnline bool