From 43761013333ddbe4f19dd425ac28c19787597f68 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Sat, 2 Nov 2024 00:19:05 +0100 Subject: [PATCH] e2e test issues solved (#6271) * Improve e2e tests Signed-off-by: Jorge Turrado * . Signed-off-by: Jorge Turrado * . Signed-off-by: Jorge Turrado * . Signed-off-by: Jorge Turrado * increase the elastic messages Signed-off-by: Jorge Turrado * fix namespace duplications Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Jorge Turrado --- .github/workflows/pr-e2e.yml | 2 +- .github/workflows/template-main-e2e-test.yml | 2 +- pkg/scalers/kubernetes_workload_scaler.go | 12 +++----- tests/helper/helper.go | 12 ++++++++ .../cache_metrics/cache_metrics_test.go | 8 ++--- .../azureeventgridtopic_test.go | 30 +++++++++++-------- .../idle_replicas/idle_replicas_test.go | 4 +-- .../restore_original/restore_original_test.go | 4 +-- .../scaled_job_validation_test.go | 2 +- .../eager_scaling_strategy_test.go | 10 +++++-- .../value_metric_type_test.go | 4 +-- tests/run-all.go | 4 +-- tests/scalers/artemis/artemis_test.go | 21 +++++++++---- .../azure_event_hub_dapr_wi_test.go | 2 +- .../azure_service_bus_queue_regex_test.go | 4 +-- .../azure_service_bus_topic_regex_test.go | 4 +-- .../elasticsearch/elasticsearch_test.go | 4 +-- .../external_scaler_sj_test.go | 8 +++-- .../github_runner/github_runner_test.go | 2 -- .../rabbitmq_queue_amqp_test.go | 4 +-- .../rabbitmq_queue_amqp_auth_test.go | 2 +- .../rabbitmq_queue_amqp_vhost_test.go | 4 +-- .../rabbitmq_queue_http_test.go | 4 +-- .../rabbitmq_queue_http_aad_wi_test.go | 4 +-- .../rabbitmq_queue_http_auth_test.go | 2 +- .../rabbitmq_queue_http_vhost_test.go | 4 +-- tests/scalers/solace/solace_test.go | 14 ++++----- .../datadog_dca/datadog_dca_test.go | 4 +++ tests/utils/helper/helper.go | 5 ++-- 29 files changed, 109 insertions(+), 77 deletions(-) rename tests/{scalers/datadog => sequential}/datadog_dca/datadog_dca_test.go (97%) diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 72d710d4b2d..bc0a5b2640d 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -181,7 +181,7 @@ jobs: - name: Scale cluster run: make scale-node-pool env: - NODE_POOL_SIZE: 2 + NODE_POOL_SIZE: 3 TEST_CLUSTER_NAME: keda-e2e-cluster-pr - name: Run end to end tests diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index d6dbb992e5a..88d4504bcd3 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -26,7 +26,7 @@ jobs: - name: Scale cluster run: make scale-node-pool env: - NODE_POOL_SIZE: 2 + NODE_POOL_SIZE: 3 - name: Run end to end tests env: diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index a3ceeaa0405..a2a658f7cba 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -33,7 +33,7 @@ var phasesCountedAsTerminated = []corev1.PodPhase{ type kubernetesWorkloadMetadata struct { PodSelector string `keda:"name=podSelector, order=triggerMetadata"` - Value float64 `keda:"name=value, order=triggerMetadata"` + Value float64 `keda:"name=value, order=triggerMetadata, default=0"` ActivationValue float64 `keda:"name=activationValue, order=triggerMetadata, default=0"` namespace string @@ -72,17 +72,13 @@ func NewKubernetesWorkloadScaler(kubeClient client.Client, config *scalersconfig func parseKubernetesWorkloadMetadata(config *scalersconfig.ScalerConfig) (kubernetesWorkloadMetadata, error) { meta := kubernetesWorkloadMetadata{} - err := config.TypedConfig(&meta) - if err != nil { - return meta, fmt.Errorf("error parsing kubernetes workload metadata: %w", err) - } - meta.namespace = config.ScalableObjectNamespace meta.triggerIndex = config.TriggerIndex meta.asMetricSource = config.AsMetricSource - if meta.asMetricSource { - meta.Value = 0 + err := config.TypedConfig(&meta) + if err != nil { + return meta, fmt.Errorf("error parsing kubernetes workload metadata: %w", err) } selector, err := labels.Parse(meta.PodSelector) diff --git a/tests/helper/helper.go b/tests/helper/helper.go index a21adb0c48f..c0262fa3674 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -253,6 +253,7 @@ func DeleteNamespace(t *testing.T, nsName string) { err = nil } assert.NoErrorf(t, err, "cannot delete kubernetes namespace - %s", err) + DeletePodsInNamespace(t, nsName) } func WaitForJobSuccess(t *testing.T, kc *kubernetes.Clientset, jobName, namespace string, iterations, interval int) bool { @@ -744,6 +745,17 @@ func DeletePodsInNamespaceBySelector(t *testing.T, kc *kubernetes.Clientset, sel assert.NoErrorf(t, err, "cannot delete pods - %s", err) } +// Delete all pods in namespace +func DeletePodsInNamespace(t *testing.T, namespace string) { + err := GetKubernetesClient(t).CoreV1().Pods(namespace).DeleteCollection(context.Background(), metav1.DeleteOptions{ + GracePeriodSeconds: ptr.To(int64(0)), + }, metav1.ListOptions{}) + if errors.IsNotFound(err) { + err = nil + } + assert.NoErrorf(t, err, "cannot delete pods - %s", err) +} + // Wait for Pods identified by selector to complete termination func WaitForPodsTerminated(t *testing.T, kc *kubernetes.Clientset, selector, namespace string, iterations, intervalSeconds int) bool { for i := 0; i < iterations; i++ { diff --git a/tests/internals/cache_metrics/cache_metrics_test.go b/tests/internals/cache_metrics/cache_metrics_test.go index 3f5525c1833..10008711668 100644 --- a/tests/internals/cache_metrics/cache_metrics_test.go +++ b/tests/internals/cache_metrics/cache_metrics_test.go @@ -160,8 +160,8 @@ func testCacheMetricsOnPollingInterval(t *testing.T, kc *kubernetes.Clientset, d // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") // Changing Metric Value to 4, but because we have a long polling interval, the replicas number should remain the same data.MonitoredDeploymentReplicas = 4 @@ -196,8 +196,8 @@ func testDirectQuery(t *testing.T, kc *kubernetes.Clientset, data templateData) // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") // Changing Metric Value to 4, deployment should scale to 2 data.MonitoredDeploymentReplicas = 4 diff --git a/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go b/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go index ae00ea8173d..a65b40c5af1 100644 --- a/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go +++ b/tests/internals/eventemitter/azureeventgridtopic/azureeventgridtopic_test.go @@ -265,21 +265,25 @@ func checkMessage(t *testing.T, count int, client *azservicebus.Client) { if err != nil { assert.NoErrorf(t, err, "cannot create receiver - %s", err) } - defer receiver.Close(context.TODO()) - - messages, err := receiver.ReceiveMessages(context.TODO(), count, nil) - assert.NoErrorf(t, err, "cannot receive messages - %s", err) - assert.NotEmpty(t, messages) + defer receiver.Close(context.Background()) + // We try to read the messages 3 times with a second of delay + tries := 3 found := false - for _, message := range messages { - event := messaging.CloudEvent{} - err = json.Unmarshal(message.Body, &event) - assert.NoErrorf(t, err, "cannot retrieve message - %s", err) - if expectedSubject == *event.Subject && - expectedSource == event.Source && - expectedType == event.Type { - found = true + for i := 0; i < tries && !found; i++ { + messages, err := receiver.ReceiveMessages(context.Background(), count, nil) + assert.NoErrorf(t, err, "cannot receive messages - %s", err) + assert.NotEmpty(t, messages) + + for _, message := range messages { + event := messaging.CloudEvent{} + err = json.Unmarshal(message.Body, &event) + assert.NoErrorf(t, err, "cannot retrieve message - %s", err) + if expectedSubject == *event.Subject && + expectedSource == event.Source && + expectedType == event.Type { + found = true + } } } diff --git a/tests/internals/idle_replicas/idle_replicas_test.go b/tests/internals/idle_replicas/idle_replicas_test.go index 46a682f9180..db5f25e0dac 100644 --- a/tests/internals/idle_replicas/idle_replicas_test.go +++ b/tests/internals/idle_replicas/idle_replicas_test.go @@ -147,8 +147,8 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- scale to max replicas ---") KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 4, testNamespace) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") } func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { diff --git a/tests/internals/restore_original/restore_original_test.go b/tests/internals/restore_original/restore_original_test.go index 83de6a3b3a1..02a8f711210 100644 --- a/tests/internals/restore_original/restore_original_test.go +++ b/tests/internals/restore_original/restore_original_test.go @@ -138,8 +138,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scaling ---") KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") } func testRestore(t *testing.T, kc *kubernetes.Clientset, data templateData) { diff --git a/tests/internals/scaled_job_validation/scaled_job_validation_test.go b/tests/internals/scaled_job_validation/scaled_job_validation_test.go index 217c3cc8c85..6074a0949f7 100644 --- a/tests/internals/scaled_job_validation/scaled_job_validation_test.go +++ b/tests/internals/scaled_job_validation/scaled_job_validation_test.go @@ -13,7 +13,7 @@ import ( ) const ( - testName = "scaled-object-validation-test" + testName = "scaled-job-validation-test" ) var ( diff --git a/tests/internals/scaling_strategies/eager_scaling_strategy_test.go b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go index e05c84c30a0..222f960ef16 100644 --- a/tests/internals/scaling_strategies/eager_scaling_strategy_test.go +++ b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go @@ -100,8 +100,11 @@ func TestScalingStrategy(t *testing.T) { }) RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) - CreateKubernetesResources(t, kc, testNamespace, data, templates) + // Publish 0 messges but create the queue + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 0) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) + CreateKubernetesResources(t, kc, testNamespace, data, templates) testEagerScaling(t, kc) } @@ -121,14 +124,17 @@ func getTemplateData() (templateData, []Template) { func testEagerScaling(t *testing.T, kc *kubernetes.Clientset) { iterationCount := 20 RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1), "job count should be %d after %d iterations", 4, iterationCount) RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 8, iterationCount, 1), "job count should be %d after %d iterations", 8, iterationCount) - RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 8) + WaitForAllJobsSuccess(t, kc, rmqNamespace, 60, 1) assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 10, iterationCount, 1), "job count should be %d after %d iterations", 10, iterationCount) } diff --git a/tests/internals/value_metric_type/value_metric_type_test.go b/tests/internals/value_metric_type/value_metric_type_test.go index 06a175f9012..40dd1646dc8 100644 --- a/tests/internals/value_metric_type/value_metric_type_test.go +++ b/tests/internals/value_metric_type/value_metric_type_test.go @@ -149,8 +149,8 @@ func testScaleByAverageValue(t *testing.T, kc *kubernetes.Clientset, data templa // Metric Value = 8, DesiredAverageMetricValue = 2 // should scale in to 8/2 = 4 replicas, irrespective of current replicas - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) } diff --git a/tests/run-all.go b/tests/run-all.go index 76134d55e32..c30cbe27f48 100644 --- a/tests/run-all.go +++ b/tests/run-all.go @@ -25,11 +25,11 @@ import ( ) var ( - concurrentTests = 15 + concurrentTests = 25 regularTestsTimeout = "20m" regularTestsRetries = 3 sequentialTestsTimeout = "20m" - sequentialTestsRetries = 1 + sequentialTestsRetries = 2 ) type TestResult struct { diff --git a/tests/scalers/artemis/artemis_test.go b/tests/scalers/artemis/artemis_test.go index 832c978852f..8bfcd61d64a 100644 --- a/tests/scalers/artemis/artemis_test.go +++ b/tests/scalers/artemis/artemis_test.go @@ -40,6 +40,7 @@ type templateData struct { SecretName string ArtemisPasswordBase64 string ArtemisUserBase64 string + MessageCount int } const ( @@ -87,8 +88,8 @@ spec: spec: containers: - name: kedartemis-consumer - image: balchu/kedartemis-consumer - imagePullPolicy: Always + image: ghcr.io/kedacore/tests-artemis + args: ["consumer"] env: - name: ARTEMIS_PASSWORD valueFrom: @@ -100,10 +101,12 @@ spec: secretKeyRef: name: {{.SecretName}} key: artemis-username - - name: ARTEMIS_HOST + - name: ARTEMIS_SERVER_HOST value: "artemis-activemq.{{.TestNamespace}}" - - name: ARTEMIS_PORT + - name: ARTEMIS_SERVER_PORT value: "61616" + - name: ARTEMIS_MESSAGE_SLEEP_MS + value: "70" ` artemisDeploymentTemplate = `apiVersion: apps/v1 @@ -260,7 +263,7 @@ spec: managementEndpoint: "artemis-activemq.{{.TestNamespace}}:8161" queueName: "test" queueLength: "50" - activationQueueLength: "1500" + activationQueueLength: "5" brokerName: "artemis-activemq" brokerAddress: "test" authenticationRef: @@ -279,7 +282,8 @@ spec: spec: containers: - name: artemis-producer - image: balchu/artemis-producer:0.0.1 + image: ghcr.io/kedacore/tests-artemis + args: ["producer"] env: - name: ARTEMIS_PASSWORD valueFrom: @@ -295,6 +299,8 @@ spec: value: "artemis-activemq.{{.TestNamespace}}" - name: ARTEMIS_SERVER_PORT value: "61616" + - name: ARTEMIS_MESSAGE_COUNT + value: "{{.MessageCount}}" restartPolicy: Never backoffLimit: 4 ` @@ -321,6 +327,7 @@ func TestArtemisScaler(t *testing.T) { func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing activation ---") + data.MessageCount = 1 KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", producerJob) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) @@ -328,6 +335,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") + data.MessageCount = 1000 KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", producerJob) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), @@ -349,6 +357,7 @@ func getTemplateData() (templateData, []Template) { SecretName: secretName, ArtemisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(artemisPassword)), ArtemisUserBase64: base64.StdEncoding.EncodeToString([]byte(artemisUser)), + MessageCount: 0, }, []Template{ {Name: "secretTemplate", Config: secretTemplate}, {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, diff --git a/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go index 13870a40bc4..8eeeae71fae 100644 --- a/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go +++ b/tests/scalers/azure/azure_event_hub_dapr_wi/azure_event_hub_dapr_wi_test.go @@ -26,7 +26,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "azure-event-hub-dapr" + testName = "azure-event-hub-dapr-wi" eventhubConsumerGroup = "$Default" ) diff --git a/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go b/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go index a8419ecd44b..06c3c3c2db1 100644 --- a/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go +++ b/tests/scalers/azure/azure_service_bus_queue_regex/azure_service_bus_queue_regex_test.go @@ -202,8 +202,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, client *azservicebus.Clie // check different aggregation operations data.Operation = "max" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") data.Operation = "avg" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) diff --git a/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go b/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go index 07bfdc57572..3227f2fdebd 100644 --- a/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go +++ b/tests/scalers/azure/azure_service_bus_topic_regex/azure_service_bus_topic_regex_test.go @@ -225,8 +225,8 @@ func testScale(t *testing.T, kc *kubernetes.Clientset, client *azservicebus.Clie // check different aggregation operations data.Operation = "max" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") data.Operation = "avg" KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) diff --git a/tests/scalers/elasticsearch/elasticsearch_test.go b/tests/scalers/elasticsearch/elasticsearch_test.go index ea83f315298..68239c27d64 100644 --- a/tests/scalers/elasticsearch/elasticsearch_test.go +++ b/tests/scalers/elasticsearch/elasticsearch_test.go @@ -81,7 +81,7 @@ metadata: labels: app: {{.DeploymentName}} spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: {{.DeploymentName}} @@ -397,7 +397,7 @@ func testElasticsearchScaler(t *testing.T, kc *kubernetes.Clientset) { AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) t.Log("--- testing scale out ---") - addElements(t, 5) + addElements(t, 10) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), "replica count should be %d after 3 minutes", maxReplicaCount) diff --git a/tests/scalers/external_scaler_sj/external_scaler_sj_test.go b/tests/scalers/external_scaler_sj/external_scaler_sj_test.go index fc47c72787d..a5cac292327 100644 --- a/tests/scalers/external_scaler_sj/external_scaler_sj_test.go +++ b/tests/scalers/external_scaler_sj/external_scaler_sj_test.go @@ -9,6 +9,7 @@ import ( "github.com/joho/godotenv" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" . "github.com/kedacore/keda/v2/tests/helper" @@ -139,6 +140,9 @@ func TestScaler(t *testing.T) { CreateKubernetesResources(t, kc, testNamespace, data, templates) + require.True(t, WaitForDeploymentReplicaReadyCount(t, kc, scalerName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + assert.True(t, WaitForJobCount(t, kc, testNamespace, 0, 60, 1), "job count should be 0 after 1 minute") @@ -184,7 +188,7 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { data.MetricValue = 0 KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) - assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 0, 60, 1), - "job count should be 0 after 1 minute") + assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 0, 120, 1), + "job count should be 0 after 2 minute") KubectlDeleteWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) } diff --git a/tests/scalers/github_runner/github_runner_test.go b/tests/scalers/github_runner/github_runner_test.go index 97235dfb978..8c64f44be44 100644 --- a/tests/scalers/github_runner/github_runner_test.go +++ b/tests/scalers/github_runner/github_runner_test.go @@ -313,13 +313,11 @@ func TestScaler(t *testing.T) { // test scaling Scaled Job with App KubectlApplyWithTemplate(t, data, "scaledGhaJobTemplate", scaledGhaJobTemplate) - // testActivation(t, kc, client) testJobScaleOut(t, kc, client, ghaWorkflowID) testJobScaleIn(t, kc) // test scaling Scaled Job KubectlApplyWithTemplate(t, data, "scaledJobTemplate", scaledJobTemplate) - // testActivation(t, kc, client) testJobScaleOut(t, kc, client, workflowID) testJobScaleIn(t, kc) diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go index 3bc8efa179e..a545ae3be28 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp/rabbitmq_queue_amqp_test.go @@ -111,8 +111,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go index 47acb3e7144..b73a7ff63d9 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_auth/rabbitmq_queue_amqp_auth_test.go @@ -20,7 +20,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "rmq-queue-amqp-test" + testName = "rmq-queue-amqp-auth-test" ) var ( diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go index c1d957e103e..915eefa1ff0 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_amqp_vhost/rabbitmq_queue_amqp_vhost_test.go @@ -111,8 +111,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go index ff1f930e7b0..b27e006c106 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http/rabbitmq_queue_http_test.go @@ -110,8 +110,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go index bd49396717a..cad93024009 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_aad_wi/rabbitmq_queue_http_aad_wi_test.go @@ -162,8 +162,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go index a95d1924a8e..ed69492645b 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_auth/rabbitmq_queue_http_auth_test.go @@ -20,7 +20,7 @@ import ( var _ = godotenv.Load("../../../.env") const ( - testName = "rmq-queue-http-test" + testName = "rmq-queue-http-test-auth" ) var ( diff --git a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go index 9dbefd50480..0720b0fe074 100644 --- a/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go +++ b/tests/scalers/rabbitmq/rabbitmq_queue_http_vhost/rabbitmq_queue_http_vhost_test.go @@ -110,8 +110,8 @@ func getTemplateData() (templateData, []Template) { func testScaling(t *testing.T, kc *kubernetes.Clientset) { t.Log("--- testing scale out ---") RMQPublishMessages(t, rmqNamespace, connectionString, queueName, messageCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), - "replica count should be 4 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 3), + "replica count should be 4 after 3 minute") t.Log("--- testing scale in ---") assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), diff --git a/tests/scalers/solace/solace_test.go b/tests/scalers/solace/solace_test.go index 13ead35fa2e..86319017ae3 100644 --- a/tests/scalers/solace/solace_test.go +++ b/tests/scalers/solace/solace_test.go @@ -207,16 +207,18 @@ spec: func TestStanScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() + + // Create kubernetes resources + CreateKubernetesResources(t, kc, testNamespace, data, templates) + installSolace(t) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + t.Cleanup(func() { KubectlDeleteWithTemplate(t, data, "scaledObjectTemplateRate", scaledObjectTemplateRate) uninstallSolace(t) DeleteKubernetesResources(t, testNamespace, data, templates) }) - // Create kubernetes resources - CreateKubernetesResources(t, kc, testNamespace, data, templates) - installSolace(t) - KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), "replica count should be 0 after 1 minute") @@ -236,11 +238,9 @@ func installSolace(t *testing.T) { require.NoErrorf(t, err, "cannot execute command - %s", err) _, err = ExecuteCommand("helm repo update") require.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set solace.usernameAdminPassword=KedaLabAdminPwd1 --set storage.persistent=false,solace.size=dev,nameOverride=pubsubplus-dev,service.type=ClusterIP --namespace %s kedalab solacecharts/pubsubplus`, + _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set solace.usernameAdminPassword=KedaLabAdminPwd1 --set storage.persistent=false,solace.size=dev,nameOverride=pubsubplus-dev,service.type=ClusterIP --wait --namespace %s kedalab solacecharts/pubsubplus`, testNamespace)) require.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand("sleep 60") // there is a bug in the solace helm chart where it is looking for the wrong number of replicas on --wait - require.NoErrorf(t, err, "cannot execute command - %s", err) // Create the pubsub broker _, _, err = ExecCommandOnSpecificPod(t, helperName, testNamespace, "./config/config_solace.sh") require.NoErrorf(t, err, "cannot execute command - %s", err) diff --git a/tests/scalers/datadog/datadog_dca/datadog_dca_test.go b/tests/sequential/datadog_dca/datadog_dca_test.go similarity index 97% rename from tests/scalers/datadog/datadog_dca/datadog_dca_test.go rename to tests/sequential/datadog_dca/datadog_dca_test.go index 66512515484..488b6ebc7b4 100644 --- a/tests/scalers/datadog/datadog_dca/datadog_dca_test.go +++ b/tests/sequential/datadog_dca/datadog_dca_test.go @@ -1,6 +1,10 @@ //go:build e2e // +build e2e +// Temporally moved to standalone e2e as I found that the DD Agent autogenerates DatadogMetric from other +// unrelated HPAs. Until we get a response about how to disable this, the best solution is moving this test +// to run standalone. We should move back it again once we solve this problem + package datadog_dca_test import ( diff --git a/tests/utils/helper/helper.go b/tests/utils/helper/helper.go index 039e5546b8c..3c618a8e4a6 100644 --- a/tests/utils/helper/helper.go +++ b/tests/utils/helper/helper.go @@ -51,8 +51,7 @@ image: repository: "otel/opentelemetry-collector-contrib" config: exporters: - logging: - loglevel: debug + debug: {} prometheus: endpoint: 0.0.0.0:8889 receivers: @@ -72,7 +71,7 @@ config: receivers: - otlp exporters: - - logging + - debug - prometheus logs: null `