diff --git a/CHANGELOG.md b/CHANGELOG.md index 08f93217faf..154b5351ea7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New - +- **General**: **EXPERIMENTAL** Adding an option to cache metric values for a scaler during the polling interval ([#2282](https://github.com/kedacore/keda/issues/2282)) - **General:** Introduce new CouchDB Scaler ([#3746](https://github.com/kedacore/keda/issues/3746)) - **General**: Introduction deprecation & breaking change policy ([Governance #68](https://github.com/kedacore/governance/issues/68)) - **General**: Consolidate all exposed Prometheus Metrics in KEDA Operator ([#3919](https://github.com/kedacore/keda/issues/3919)) @@ -47,6 +47,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - **General:** Introduce new Loki Scaler ([#3699](https://github.com/kedacore/keda/issues/3699)) - **General:** Introduce new Etcd Scaler ([#3880](https://github.com/kedacore/keda/issues/3880)) - **General**: Introduce rate-limitting parameters to KEDA manager to allow override of client defaults ([#3730](https://github.com/kedacore/keda/issues/3730)) +- **General**: Introduction deprecation & breaking change policy ([Governance #68](https://github.com/kedacore/governance/issues/68)) - **General**: Provide off-the-shelf Grafana dashboard for application autoscaling ([Docs](https://keda.sh/docs/2.9/operate/prometheus/) | [#3911](https://github.com/kedacore/keda/issues/3911)) - **General**: Provide Prometheus metric with indication of total number of custom resources per namespace for each custom resource type (CRD). ([#2637](https://github.com/kedacore/keda/issues/2637)|[#2638](https://github.com/kedacore/keda/issues/2638)|[#2639](https://github.com/kedacore/keda/issues/2639)) - **General**: Provide Prometheus metric with indication of total number of triggers per trigger type in `ScaledJob`/`ScaledObject`. ([#3663](https://github.com/kedacore/keda/issues/3663)) diff --git a/CREATE-NEW-SCALER.md b/CREATE-NEW-SCALER.md index 3b4b8c89411..1ec9ee828f6 100644 --- a/CREATE-NEW-SCALER.md +++ b/CREATE-NEW-SCALER.md @@ -20,20 +20,42 @@ If you want to deploy locally The scalers in KEDA are implementations of a KEDA `Scaler` Go interface declared in `pkg/scalers/scaler.go`. This documentation describes how scalers work and is targeted towards contributors and maintainers. -### GetMetrics +### GetMetricsAndActivity -This is the key function of a scaler; it returns a value that represents a current state of an external metric (e.g. length of a queue). The return type is an `ExternalMetricValue` struct which has the following fields: -- `MetricName`: this is the name of the metric that we are returning. The name should be unique, to allow setting multiple (even the same type) Triggers in one ScaledObject, but each function call should return the same name. -- `Timestamp`: indicates the time at which the metrics were produced. -- `WindowSeconds`: //TODO -- `Value`: A numerical value that represents the state of the metric. It could be the length of a queue, or it can be the amount of lag in a stream, but it can also be a simple representation of the state. +This is the key function of a scaler; it returns: -Kubernetes HPA (Horizontal Pod Autoscaler) will poll `GetMetrics` regularly through KEDA's metric server (as long as there is at least one pod), and compare the returned value to a configured value in the ScaledObject configuration. Kubernetes will use the following formula to decide whether to scale the pods up and down: +1. A value that represents a current state of an external metric (e.g. length of a queue). The return type is an `ExternalMetricValue` struct which has the following fields: + - `MetricName`: this is the name of the metric that we are returning. The name should be unique, to allow setting multiple (even the same type) Triggers in one ScaledObject, but each function call should return the same name. + - `Timestamp`: indicates the time at which the metrics were produced. + - `WindowSeconds`: //TODO + - `Value`: A numerical value that represents the state of the metric. It could be the length of a queue, or it can be the amount of lag in a stream, but it can also be a simple representation of the state. +2. A value that represents an activity of the scaler. The return type is `bool`. + + KEDA polls ScaledObject object according to the `pollingInterval` configured in the ScaledObject; it checks the last time it was polled, it checks if the number of replicas is greater than 0, and if the scaler itself is active. So if the scaler returns false for `IsActive`, and if current number of replicas is greater than 0, and there is no configured minimum pods, then KEDA scales down to 0. + +Kubernetes HPA (Horizontal Pod Autoscaler) will poll `GetMetricsAndActivity` regularly through KEDA's metric server (as long as there is at least one pod), and compare the returned value to a configured value in the ScaledObject configuration. Kubernetes will use the following formula to decide whether to scale the pods up and down: `desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )]`. For more details check [Kubernetes HPA documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). +
Next lines are an example about how to use it: +>```golang +func (s *artemisScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + messages, err := s.getQueueMessageCount(ctx) + + if err != nil { + s.logger.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) + return []external_metrics.ExternalMetricValue{}, false, err + } + + metric := GenerateMetricInMili(metricName, float64(messages)) + + return []external_metrics.ExternalMetricValue{metric}, messages > s.metadata.activationQueueLength, nil +} +>``` + + ### GetMetricSpecForScaling KEDA works in conjunction with Kubernetes Horizontal Pod Autoscaler (HPA). When KEDA notices a new ScaledObject, it creates an HPA object that has basic information about the metric it needs to poll and scale the pods accordingly. To create this HPA object, KEDA invokes `GetMetricSpecForScaling`. @@ -64,12 +86,6 @@ For example: >``` -### IsActive - -For some reason, the scaler might need to declare itself as in-active, and the way it can do this is through implementing the function `IsActive`. - -KEDA polls ScaledObject object according to the `pollingInterval` configured in the ScaledObject; it checks the last time it was polled, it checks if the number of replicas is greater than 0, and if the scaler itself is active. So if the scaler returns false for `IsActive`, and if current number of replicas is greater than 0, and there is no configured minimum pods, then KEDA scales down to 0. - ### Close After each poll on the scaler to retrieve the metrics, KEDA calls this function for each scaler to give the scaler the opportunity to close any resources, like http clients for example. diff --git a/Makefile b/Makefile index 873fc40a215..2e03d01939a 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,7 @@ GO_LDFLAGS="-X=github.com/kedacore/keda/v2/version.GitCommit=$(GIT_COMMIT) -X=gi COSIGN_FLAGS ?= -a GIT_HASH=${GIT_COMMIT} -a GIT_VERSION=${VERSION} -a BUILD_DATE=${DATE} # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.24 +ENVTEST_K8S_VERSION = 1.25 # Setting SHELL to bash allows bash commands to be executed by recipes. # This is a requirement for 'setup-envtest.sh' in the test target. @@ -147,10 +147,12 @@ proto-gen: protoc-gen ## Generate Liiklus, ExternalScaler and MetricsService pro PATH="$(LOCALBIN):$(PATH)" protoc -I vendor --proto_path=pkg/metricsservice/api metrics.proto --go_out=pkg/metricsservice/api --go-grpc_out=pkg/metricsservice/api .PHONY: mockgen-gen -mockgen-gen: mockgen pkg/mock/mock_scaling/mock_interface.go pkg/mock/mock_scaler/mock_scaler.go pkg/mock/mock_scale/mock_interfaces.go pkg/mock/mock_client/mock_interfaces.go pkg/scalers/liiklus/mocks/mock_liiklus.go +mockgen-gen: mockgen pkg/mock/mock_scaling/mock_interface.go pkg/mock/mock_scaling/mock_executor/mock_interface.go pkg/mock/mock_scaler/mock_scaler.go pkg/mock/mock_scale/mock_interfaces.go pkg/mock/mock_client/mock_interfaces.go pkg/scalers/liiklus/mocks/mock_liiklus.go pkg/mock/mock_scaling/mock_interface.go: pkg/scaling/scale_handler.go $(MOCKGEN) -destination=$@ -package=mock_scaling -source=$^ +pkg/mock/mock_scaling/mock_executor/mock_interface.go: pkg/scaling/executor/scale_executor.go + $(MOCKGEN) -destination=$@ -package=mock_executor -source=$^ pkg/mock/mock_scaler/mock_scaler.go: pkg/scalers/scaler.go $(MOCKGEN) -destination=$@ -package=mock_scalers -source=$^ pkg/mock/mock_scale/mock_interfaces.go: vendor/k8s.io/client-go/scale/interfaces.go diff --git a/apis/keda/v1alpha1/identifier_test.go b/apis/keda/v1alpha1/identifier_test.go new file mode 100644 index 00000000000..251829c190a --- /dev/null +++ b/apis/keda/v1alpha1/identifier_test.go @@ -0,0 +1,81 @@ +package v1alpha1 + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type testData struct { + name string + expectedIdentifier string + soName string + soNamespace string + soKind string +} + +var tests = []testData{ + { + name: "all lowercase", + expectedIdentifier: "scaledobject.namespace.name", + soName: "name", + soNamespace: "namespace", + soKind: "scaledobject", + }, + { + name: "all uppercase", + expectedIdentifier: "scaledobject.namespace.name", + soName: "NAME", + soNamespace: "NAMESPACE", + soKind: "SCALEDOBJECT", + }, + { + name: "camel case", + expectedIdentifier: "scaledobject.namespace.name", + soName: "name", + soNamespace: "namespace", + soKind: "scaledobject", + }, + { + name: "missing namespace", + expectedIdentifier: "scaledobject..name", + soName: "name", + soKind: "scaledobject", + }, +} + +func TestGeneratedIdentifierForScaledObject(t *testing.T) { + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + expectedIdentifier := test.expectedIdentifier + genericIdentifier := GenerateIdentifier(test.soKind, test.soNamespace, test.soName) + + scaledObject := &ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: test.soName, + Namespace: test.soNamespace, + }, + } + scaledObjectIdentifier := scaledObject.GenerateIdentifier() + + withTriggers, err := AsDuckWithTriggers(scaledObject) + if err != nil { + t.Errorf("got error while converting to WithTriggers object: %s", err) + } + withTriggersIdentifier := withTriggers.GenerateIdentifier() + + if expectedIdentifier != genericIdentifier { + t.Errorf("genericIdentifier=%q doesn't equal the expectedIdentifier=%q", genericIdentifier, expectedIdentifier) + } + + if expectedIdentifier != scaledObjectIdentifier { + t.Errorf("scaledObjectIdentifier=%q doesn't equal the expectedIdentifier=%q", scaledObjectIdentifier, expectedIdentifier) + } + + if expectedIdentifier != withTriggersIdentifier { + t.Errorf("withTriggersIdentifier=%q doesn't equal the expectedIdentifier=%q", withTriggersIdentifier, expectedIdentifier) + } + }) + } +} diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index e06d49f0425..316b8e5118e 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -123,7 +123,10 @@ type ScaleTarget struct { type ScaleTriggers struct { Type string `json:"type"` // +optional - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty"` + + UseCachedMetrics bool `json:"useCachedMetrics,omitempty"` + Metadata map[string]string `json:"metadata"` // +optional AuthenticationRef *ScaledObjectAuthRef `json:"authenticationRef,omitempty"` @@ -179,3 +182,8 @@ type ScaledObjectAuthRef struct { func init() { SchemeBuilder.Register(&ScaledObject{}, &ScaledObjectList{}) } + +// GenerateIdentifier returns identifier for the object in for "kind.namespace.name" +func (s *ScaledObject) GenerateIdentifier() string { + return GenerateIdentifier("ScaledObject", s.Namespace, s.Name) +} diff --git a/apis/keda/v1alpha1/withtriggers_types.go b/apis/keda/v1alpha1/withtriggers_types.go index f442c63deb4..5d280499498 100644 --- a/apis/keda/v1alpha1/withtriggers_types.go +++ b/apis/keda/v1alpha1/withtriggers_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +38,8 @@ type WithTriggers struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec WithTriggersSpec `json:"spec"` + InternalKind string `json:"internalKind"` + Spec WithTriggersSpec `json:"spec"` } // WithTriggersSpec is the spec for a an object with triggers resource @@ -89,5 +91,35 @@ func (t *WithTriggers) GetPollingInterval() time.Duration { // GenerateIdentifier returns identifier for the object in for "kind.namespace.name" func (t *WithTriggers) GenerateIdentifier() string { - return GenerateIdentifier(t.Kind, t.Namespace, t.Name) + return GenerateIdentifier(t.InternalKind, t.Namespace, t.Name) +} + +// AsDuckWithTriggers tries to generates WithTriggers object for input object +// returns error if input object is unknown +func AsDuckWithTriggers(scalableObject interface{}) (*WithTriggers, error) { + switch obj := scalableObject.(type) { + case *ScaledObject: + return &WithTriggers{ + TypeMeta: obj.TypeMeta, + ObjectMeta: obj.ObjectMeta, + InternalKind: "ScaledObject", + Spec: WithTriggersSpec{ + PollingInterval: obj.Spec.PollingInterval, + Triggers: obj.Spec.Triggers, + }, + }, nil + case *ScaledJob: + return &WithTriggers{ + TypeMeta: obj.TypeMeta, + ObjectMeta: obj.ObjectMeta, + InternalKind: "ScaledJob", + Spec: WithTriggersSpec{ + PollingInterval: obj.Spec.PollingInterval, + Triggers: obj.Spec.Triggers, + }, + }, nil + default: + // here could be the conversion from unknown Duck type potentially in the future + return nil, fmt.Errorf("unknown scalable object type %v", scalableObject) + } } diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 4e091d1f888..76df2fb82d9 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -7874,6 +7874,8 @@ spec: type: string type: type: string + useCachedMetrics: + type: boolean required: - metadata - type diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index 22925dcd715..07dfe5395d2 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -276,6 +276,8 @@ spec: type: string type: type: string + useCachedMetrics: + type: boolean required: - metadata - type diff --git a/controllers/keda/hpa_test.go b/controllers/keda/hpa_test.go index 7cef70233db..a6655cae034 100644 --- a/controllers/keda/hpa_test.go +++ b/controllers/keda/hpa_test.go @@ -137,7 +137,6 @@ func setupTest(health map[string]v1alpha1.HealthStatus, scaler *mock_scalers.Moc return scaler, &scalers.ScalerConfig{}, nil }, }}, - Logger: logr.Discard(), Recorder: nil, } metricSpec := v2.MetricSpec{ diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index e7e11cea143..c8b5d4d8adc 100644 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -173,6 +173,9 @@ func (r *ScaledJobReconciler) reconcileScaledJob(ctx context.Context, logger log } for _, trigger := range scaledJob.Spec.Triggers { + if trigger.UseCachedMetrics { + logger.Info("Warning: property useCachedMetrics is not supported for ScaledJobs.") + } if trigger.MetricType != "" { err := fmt.Errorf("metricType is set in one of the ScaledJob scaler") logger.Error(err, "metricType cannot be set in ScaledJob triggers") diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index a81dba5ce9f..0b83c4f8378 100644 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -221,7 +221,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg return "ScaledObject doesn't have correct Idle/Min/Max Replica Counts specification", err } - err = r.checkTriggerNamesAreUnique(scaledObject) + err = r.checkTriggers(scaledObject) if err != nil { return "ScaledObject doesn't have correct triggers specification", err } @@ -337,14 +337,24 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte return gvkr, nil } -// checkTriggerNamesAreUnique checks that all triggerNames in ScaledObject are unique -func (r *ScaledObjectReconciler) checkTriggerNamesAreUnique(scaledObject *kedav1alpha1.ScaledObject) error { +// checkTriggers checks that general trigger metadata are valid, it checks: +// - triggerNames in ScaledObject are unique +// - useCachedMetrics is defined only for a supported triggers +func (r *ScaledObjectReconciler) checkTriggers(scaledObject *kedav1alpha1.ScaledObject) error { triggersCount := len(scaledObject.Spec.Triggers) if triggersCount > 1 { triggerNames := make(map[string]bool, triggersCount) for i := 0; i < triggersCount; i++ { - name := scaledObject.Spec.Triggers[i].Name + trigger := scaledObject.Spec.Triggers[i] + + if trigger.UseCachedMetrics { + if trigger.Type == "cpu" || trigger.Type == "memory" || trigger.Type == "cron" { + return fmt.Errorf("property \"useCachedMetrics\" is not supported for %q scaler", trigger.Type) + } + } + + name := trigger.Name if name != "" { if _, found := triggerNames[name]; found { // found duplicate name diff --git a/go.mod b/go.mod index bee6250c5a0..2c51ca8dfb5 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,6 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gocql/gocql v1.3.0 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/gophercloud/gophercloud v1.1.0 @@ -64,6 +63,7 @@ require ( github.com/xdg/scram v1.0.5 github.com/xhit/go-str2duration/v2 v2.0.0 github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a + go.etcd.io/etcd/client/v3 v3.5.4 go.mongodb.org/mongo-driver v1.11.0 golang.org/x/oauth2 v0.2.0 google.golang.org/api v0.103.0 @@ -161,6 +161,7 @@ require ( github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect @@ -251,7 +252,6 @@ require ( github.com/xlab/treeprint v1.1.0 // indirect go.etcd.io/etcd/api/v3 v3.5.4 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect - go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect diff --git a/pkg/fallback/fallback_test.go b/pkg/fallback/fallback_test.go index 2103d441ead..c4124b3217b 100644 --- a/pkg/fallback/fallback_test.go +++ b/pkg/fallback/fallback_test.go @@ -76,7 +76,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) @@ -107,7 +107,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) @@ -117,13 +117,13 @@ var _ = Describe("fallback", func() { }) It("should propagate the error when fallback is disabled", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) so := buildScaledObject(nil, nil) metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) @@ -131,7 +131,7 @@ var _ = Describe("fallback", func() { }) It("should bump the number of failures when metrics call fails", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(0) so := buildScaledObject( @@ -152,7 +152,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) @@ -161,7 +161,7 @@ var _ = Describe("fallback", func() { }) It("should return a normalised metric when number of failures are beyond threshold", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(3) expectedMetricValue := int64(100) @@ -182,7 +182,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) @@ -214,7 +214,7 @@ var _ = Describe("fallback", func() { }) It("should ignore error if we fail to update kubernetes status", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(3) expectedMetricValue := int64(100) @@ -238,7 +238,7 @@ var _ = Describe("fallback", func() { statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("Some error")) client.EXPECT().Status().Return(statusWriter) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) @@ -248,7 +248,7 @@ var _ = Describe("fallback", func() { }) It("should return error when fallback is enabled but scaledobject has invalid parameter", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(3) so := buildScaledObject( @@ -268,7 +268,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) @@ -276,7 +276,7 @@ var _ = Describe("fallback", func() { }) It("should set the fallback condition when a fallback exists in the scaled object", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(3) failingNumberOfFailures := int32(6) anotherMetricName := "another metric name" @@ -302,7 +302,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) condition := so.Status.Conditions.GetFallbackCondition() @@ -310,7 +310,7 @@ var _ = Describe("fallback", func() { }) It("should set the fallback condition to false if the config is invalid", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return(nil, false, errors.New("Some error")) startingNumberOfFailures := int32(3) failingNumberOfFailures := int32(6) anotherMetricName := "another metric name" @@ -336,7 +336,7 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, _, err := scaler.GetMetricsAndActivity(context.Background(), metricName) _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(Equal("Some error")) @@ -425,7 +425,7 @@ func primeGetMetrics(scaler *mock_scalers.MockScaler, value int64) { Timestamp: metav1.Now(), } - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return([]external_metrics.ExternalMetricValue{expectedMetric}, nil) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Eq(metricName)).Return([]external_metrics.ExternalMetricValue{expectedMetric}, true, nil) } func createMetricSpec(averageValue int) v2.MetricSpec { diff --git a/pkg/metricsservice/client.go b/pkg/metricsservice/client.go index b6d20d4f478..cb96275830d 100644 --- a/pkg/metricsservice/client.go +++ b/pkg/metricsservice/client.go @@ -61,12 +61,16 @@ func NewGrpcClient(url string) (*GrpcClient, error) { func (c *GrpcClient) GetMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *api.PromMetricsMsg, error) { response, err := c.client.GetMetrics(ctx, &api.ScaledObjectRef{Name: scaledObjectName, Namespace: scaledObjectNamespace, MetricName: metricName}) if err != nil { + // in certain cases we would like to get Prometheus metrics even if there's an error + // so we can expose information about the error in the client return nil, response.GetPromMetrics(), err } extMetrics := &external_metrics.ExternalMetricValueList{} err = v1beta1.Convert_v1beta1_ExternalMetricValueList_To_external_metrics_ExternalMetricValueList(response.GetMetrics(), extMetrics, nil) if err != nil { + // in certain cases we would like to get Prometheus metrics even if there's an error + // so we can expose information about the error in the client return nil, response.GetPromMetrics(), fmt.Errorf("error when converting metric values %s", err) } diff --git a/pkg/metricsservice/server.go b/pkg/metricsservice/server.go index 2d6f6325ee0..5ef78fcb262 100644 --- a/pkg/metricsservice/server.go +++ b/pkg/metricsservice/server.go @@ -23,13 +23,13 @@ import ( "google.golang.org/grpc" "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" - ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/metricsservice/api" "github.com/kedacore/keda/v2/pkg/scaling" ) -var log = ctrl.Log.WithName("grpc_server") +var log = logf.Log.WithName("grpc_server") type GrpcServer struct { server *grpc.Server @@ -40,20 +40,23 @@ type GrpcServer struct { // GetMetrics returns metrics values in form of ExternalMetricValueList for specified ScaledObject reference func (s *GrpcServer) GetMetrics(ctx context.Context, in *api.ScaledObjectRef) (*api.Response, error) { + response := api.Response{} v1beta1ExtMetrics := &v1beta1.ExternalMetricValueList{} - extMetrics, exportedMetrics, err := (*s.scalerHandler).GetExternalMetrics(ctx, in.Name, in.Namespace, in.MetricName) + extMetrics, exportedMetrics, err := (*s.scalerHandler).GetScaledObjectMetrics(ctx, in.Name, in.Namespace, in.MetricName) + response.PromMetrics = exportedMetrics if err != nil { - return nil, fmt.Errorf("error when getting metric values %s", err) + return &response, fmt.Errorf("error when getting metric values %s", err) } err = v1beta1.Convert_external_metrics_ExternalMetricValueList_To_v1beta1_ExternalMetricValueList(extMetrics, v1beta1ExtMetrics, nil) if err != nil { - return nil, fmt.Errorf("error when converting metric values %s", err) + return &response, fmt.Errorf("error when converting metric values %s", err) } log.V(1).WithValues("scaledObjectName", in.Name, "scaledObjectNamespace", in.Namespace, "metrics", v1beta1ExtMetrics).Info("Providing metrics") + response.Metrics = v1beta1ExtMetrics - return &api.Response{Metrics: v1beta1ExtMetrics, PromMetrics: exportedMetrics}, nil + return &response, nil } // NewGrpcServer creates a new instance of GrpcServer diff --git a/pkg/mock/mock_scaler/mock_scaler.go b/pkg/mock/mock_scaler/mock_scaler.go index 7615887ae30..db30ab8403a 100644 --- a/pkg/mock/mock_scaler/mock_scaler.go +++ b/pkg/mock/mock_scaler/mock_scaler.go @@ -64,34 +64,20 @@ func (mr *MockScalerMockRecorder) GetMetricSpecForScaling(ctx interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricSpecForScaling", reflect.TypeOf((*MockScaler)(nil).GetMetricSpecForScaling), ctx) } -// GetMetrics mocks base method. -func (m *MockScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity mocks base method. +func (m *MockScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName) + ret := m.ctrl.Call(m, "GetMetricsAndActivity", ctx, metricName) ret0, _ := ret[0].([]external_metrics.ExternalMetricValue) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// GetMetrics indicates an expected call of GetMetrics. -func (mr *MockScalerMockRecorder) GetMetrics(ctx, metricName interface{}) *gomock.Call { +// GetMetricsAndActivity indicates an expected call of GetMetricsAndActivity. +func (mr *MockScalerMockRecorder) GetMetricsAndActivity(ctx, metricName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockScaler)(nil).GetMetrics), ctx, metricName) -} - -// IsActive mocks base method. -func (m *MockScaler) IsActive(ctx context.Context) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsActive", ctx) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsActive indicates an expected call of IsActive. -func (mr *MockScalerMockRecorder) IsActive(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActive", reflect.TypeOf((*MockScaler)(nil).IsActive), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricsAndActivity", reflect.TypeOf((*MockScaler)(nil).GetMetricsAndActivity), ctx, metricName) } // MockPushScaler is a mock of PushScaler interface. @@ -145,34 +131,20 @@ func (mr *MockPushScalerMockRecorder) GetMetricSpecForScaling(ctx interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricSpecForScaling", reflect.TypeOf((*MockPushScaler)(nil).GetMetricSpecForScaling), ctx) } -// GetMetrics mocks base method. -func (m *MockPushScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity mocks base method. +func (m *MockPushScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName) + ret := m.ctrl.Call(m, "GetMetricsAndActivity", ctx, metricName) ret0, _ := ret[0].([]external_metrics.ExternalMetricValue) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetMetrics indicates an expected call of GetMetrics. -func (mr *MockPushScalerMockRecorder) GetMetrics(ctx, metricName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockPushScaler)(nil).GetMetrics), ctx, metricName) -} - -// IsActive mocks base method. -func (m *MockPushScaler) IsActive(ctx context.Context) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsActive", ctx) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } -// IsActive indicates an expected call of IsActive. -func (mr *MockPushScalerMockRecorder) IsActive(ctx interface{}) *gomock.Call { +// GetMetricsAndActivity indicates an expected call of GetMetricsAndActivity. +func (mr *MockPushScalerMockRecorder) GetMetricsAndActivity(ctx, metricName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActive", reflect.TypeOf((*MockPushScaler)(nil).IsActive), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricsAndActivity", reflect.TypeOf((*MockPushScaler)(nil).GetMetricsAndActivity), ctx, metricName) } // Run mocks base method. diff --git a/pkg/mock/mock_scaling/mock_executor/mock_interface.go b/pkg/mock/mock_scaling/mock_executor/mock_interface.go new file mode 100644 index 00000000000..40bbfeb1989 --- /dev/null +++ b/pkg/mock/mock_scaling/mock_executor/mock_interface.go @@ -0,0 +1,60 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/scaling/executor/scale_executor.go + +// Package mock_executor is a generated GoMock package. +package mock_executor + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + v1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" +) + +// MockScaleExecutor is a mock of ScaleExecutor interface. +type MockScaleExecutor struct { + ctrl *gomock.Controller + recorder *MockScaleExecutorMockRecorder +} + +// MockScaleExecutorMockRecorder is the mock recorder for MockScaleExecutor. +type MockScaleExecutorMockRecorder struct { + mock *MockScaleExecutor +} + +// NewMockScaleExecutor creates a new mock instance. +func NewMockScaleExecutor(ctrl *gomock.Controller) *MockScaleExecutor { + mock := &MockScaleExecutor{ctrl: ctrl} + mock.recorder = &MockScaleExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScaleExecutor) EXPECT() *MockScaleExecutorMockRecorder { + return m.recorder +} + +// RequestJobScale mocks base method. +func (m *MockScaleExecutor) RequestJobScale(ctx context.Context, scaledJob *v1alpha1.ScaledJob, isActive bool, scaleTo, maxScale int64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RequestJobScale", ctx, scaledJob, isActive, scaleTo, maxScale) +} + +// RequestJobScale indicates an expected call of RequestJobScale. +func (mr *MockScaleExecutorMockRecorder) RequestJobScale(ctx, scaledJob, isActive, scaleTo, maxScale interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestJobScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestJobScale), ctx, scaledJob, isActive, scaleTo, maxScale) +} + +// RequestScale mocks base method. +func (m *MockScaleExecutor) RequestScale(ctx context.Context, scaledObject *v1alpha1.ScaledObject, isActive, isError bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RequestScale", ctx, scaledObject, isActive, isError) +} + +// RequestScale indicates an expected call of RequestScale. +func (mr *MockScaleExecutorMockRecorder) RequestScale(ctx, scaledObject, isActive, isError interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestScale), ctx, scaledObject, isActive, isError) +} diff --git a/pkg/mock/mock_scaling/mock_interface.go b/pkg/mock/mock_scaling/mock_interface.go index 8cc1a70b93b..d0579a12bac 100644 --- a/pkg/mock/mock_scaling/mock_interface.go +++ b/pkg/mock/mock_scaling/mock_interface.go @@ -65,20 +65,20 @@ func (mr *MockScaleHandlerMockRecorder) DeleteScalableObject(ctx, scalableObject return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScalableObject", reflect.TypeOf((*MockScaleHandler)(nil).DeleteScalableObject), ctx, scalableObject) } -// GetExternalMetrics mocks base method. -func (m *MockScaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *api.PromMetricsMsg, error) { +// GetScaledObjectMetrics mocks base method. +func (m *MockScaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *api.PromMetricsMsg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetExternalMetrics", ctx, scaledObjectName, scaledObjectNamespace, metricName) + ret := m.ctrl.Call(m, "GetScaledObjectMetrics", ctx, scaledObjectName, scaledObjectNamespace, metricName) ret0, _ := ret[0].(*external_metrics.ExternalMetricValueList) ret1, _ := ret[1].(*api.PromMetricsMsg) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } -// GetExternalMetrics indicates an expected call of GetExternalMetrics. -func (mr *MockScaleHandlerMockRecorder) GetExternalMetrics(ctx, scaledObjectName, scaledObjectNamespace, metricName interface{}) *gomock.Call { +// GetScaledObjectMetrics indicates an expected call of GetScaledObjectMetrics. +func (mr *MockScaleHandlerMockRecorder) GetScaledObjectMetrics(ctx, scaledObjectName, scaledObjectNamespace, metricName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExternalMetrics", reflect.TypeOf((*MockScaleHandler)(nil).GetExternalMetrics), ctx, scaledObjectName, scaledObjectNamespace, metricName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScaledObjectMetrics", reflect.TypeOf((*MockScaleHandler)(nil).GetScaledObjectMetrics), ctx, scaledObjectName, scaledObjectNamespace, metricName) } // GetScalersCache mocks base method. diff --git a/pkg/prommetrics/prommetrics.go b/pkg/prommetrics/prommetrics.go index 925f7728173..193128ef29c 100644 --- a/pkg/prommetrics/prommetrics.go +++ b/pkg/prommetrics/prommetrics.go @@ -20,11 +20,11 @@ import ( "strconv" "github.com/prometheus/client_golang/prometheus" - ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" ) -var log = ctrl.Log.WithName("prometheus_server") +var log = logf.Log.WithName("prometheus_server") const ( ClusterTriggerAuthenticationResource = "cluster_trigger_authentication" diff --git a/pkg/scalers/activemq_scaler.go b/pkg/scalers/activemq_scaler.go index 7fc8a0e6aee..2e07e1984ff 100644 --- a/pkg/scalers/activemq_scaler.go +++ b/pkg/scalers/activemq_scaler.go @@ -166,16 +166,6 @@ func parseActiveMQMetadata(config *ScalerConfig) (*activeMQMetadata, error) { return &meta, nil } -func (s *activeMQScaler) IsActive(ctx context.Context) (bool, error) { - queueSize, err := s.getQueueMessageCount(ctx) - if err != nil { - s.logger.Error(err, "Unable to access activeMQ management endpoint", "managementEndpoint", s.metadata.managementEndpoint) - return false, err - } - - return queueSize > s.metadata.activationTargetQueueSize, nil -} - // getRestAPIParameters parse restAPITemplate to provide managementEndpoint, brokerName, destinationName func getRestAPIParameters(meta activeMQMetadata) (activeMQMetadata, error) { u, err := url.ParseRequestURI(meta.restAPITemplate) @@ -278,15 +268,15 @@ func (s *activeMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpe return []v2.MetricSpec{metricSpec} } -func (s *activeMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *activeMQScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queueSize, err := s.getQueueMessageCount(ctx) if err != nil { - return nil, fmt.Errorf("error inspecting ActiveMQ queue size: %s", err) + return nil, false, fmt.Errorf("error inspecting ActiveMQ queue size: %s", err) } metric := GenerateMetricInMili(metricName, float64(queueSize)) - return []external_metrics.ExternalMetricValue{metric}, nil + return []external_metrics.ExternalMetricValue{metric}, queueSize > s.metadata.activationTargetQueueSize, nil } func (s *activeMQScaler) Close(context.Context) error { diff --git a/pkg/scalers/artemis_scaler.go b/pkg/scalers/artemis_scaler.go index 7ba5e36f1dc..a90239b2d0c 100644 --- a/pkg/scalers/artemis_scaler.go +++ b/pkg/scalers/artemis_scaler.go @@ -176,17 +176,6 @@ func parseArtemisMetadata(config *ScalerConfig) (*artemisMetadata, error) { return &meta, nil } -// IsActive determines if we need to scale from zero -func (s *artemisScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.getQueueMessageCount(ctx) - if err != nil { - s.logger.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) - return false, err - } - - return messages > s.metadata.activationQueueLength, nil -} - // getAPIParameters parse restAPITemplate to provide managementEndpoint , brokerName, brokerAddress, queueName func getAPIParameters(meta artemisMetadata) (artemisMetadata, error) { u, err := url.ParseRequestURI(meta.restAPITemplate) @@ -265,7 +254,7 @@ func (s *artemisScaler) getQueueMessageCount(ctx context.Context) (int64, error) return messageCount, nil } -func (s *artemisScaler) GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec { +func (s *artemisScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("artemis-%s", s.metadata.queueName))), @@ -276,18 +265,18 @@ func (s *artemisScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metric return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *artemisScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *artemisScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { messages, err := s.getQueueMessageCount(ctx) if err != nil { s.logger.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(messages)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, messages > s.metadata.activationQueueLength, nil } // Nothing to close here. diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index 646988995eb..5c448ae6b13 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -273,17 +273,17 @@ func computeQueryWindow(current time.Time, metricPeriodSec, metricEndTimeOffsetS return } -func (s *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsCloudwatchScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { metricValue, err := s.GetCloudwatchMetrics() if err != nil { s.logger.Error(err, "Error getting metric value") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, metricValue) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, metricValue > s.metadata.activationTargetMetricValue, nil } func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -305,16 +305,6 @@ func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2.Metr return []v2.MetricSpec{metricSpec} } -func (s *awsCloudwatchScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.GetCloudwatchMetrics() - - if err != nil { - return false, err - } - - return val > s.metadata.activationTargetMetricValue, nil -} - func (s *awsCloudwatchScaler) Close(context.Context) error { return nil } diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go index e07153cdff0..9fb09287327 100644 --- a/pkg/scalers/aws_cloudwatch_scaler_test.go +++ b/pkg/scalers/aws_cloudwatch_scaler_test.go @@ -501,7 +501,7 @@ func TestAWSCloudwatchGetMetricSpecForScaling(t *testing.T) { func TestAWSCloudwatchScalerGetMetrics(t *testing.T) { for _, meta := range awsCloudwatchGetMetricTestData { mockAWSCloudwatchScaler := awsCloudwatchScaler{"", &meta, &mockCloudwatch{}, logr.Discard()} - value, err := mockAWSCloudwatchScaler.GetMetrics(context.Background(), meta.metricsName) + value, _, err := mockAWSCloudwatchScaler.GetMetricsAndActivity(context.Background(), meta.metricsName) switch meta.metricsName { case testAWSCloudwatchErrorMetric: assert.Error(t, err, "expect error because of cloudwatch api error") diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go index 935d4316e23..6a5a8022c40 100644 --- a/pkg/scalers/aws_dynamodb_scaler.go +++ b/pkg/scalers/aws_dynamodb_scaler.go @@ -151,16 +151,16 @@ func createDynamoDBClient(metadata *awsDynamoDBMetadata) *dynamodb.DynamoDB { return dynamodb.New(sess, config) } -func (s *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsDynamoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { metricValue, err := s.GetQueryMetrics() if err != nil { s.logger.Error(err, "Error getting metric value") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, metricValue) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, metricValue > float64(s.metadata.activationTargetValue), nil } func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -177,15 +177,6 @@ func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.Metric } } -func (s *awsDynamoDBScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.GetQueryMetrics() - if err != nil { - return false, fmt.Errorf("error inspecting aws-dynamodb: %s", err) - } - - return messages > float64(s.metadata.activationTargetValue), nil -} - func (s *awsDynamoDBScaler) Close(context.Context) error { return nil } diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go index 12ebc276544..b07ccad3684 100644 --- a/pkg/scalers/aws_dynamodb_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_scaler_test.go @@ -305,7 +305,7 @@ func TestDynamoGetMetrics(t *testing.T) { t.Run(meta.tableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "aws-dynamodb") + value, _, err := scaler.GetMetricsAndActivity(context.Background(), "aws-dynamodb") switch meta.tableName { case testAWSDynamoErrorTable: assert.Error(t, err, "expect error because of dynamodb api error") @@ -341,7 +341,7 @@ func TestDynamoIsActive(t *testing.T) { t.Run(meta.tableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} - value, err := scaler.IsActive(context.Background()) + _, value, err := scaler.GetMetricsAndActivity(context.Background(), "aws-dynamodb") switch meta.tableName { case testAWSDynamoErrorTable: assert.Error(t, err, "expect error because of cloudwatch api error") diff --git a/pkg/scalers/aws_dynamodb_streams_scaler.go b/pkg/scalers/aws_dynamodb_streams_scaler.go index 6f6c06d903c..8f2ae7dc525 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler.go @@ -11,8 +11,6 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -148,15 +146,6 @@ func getDynamoDBStreamsArn(ctx context.Context, db dynamodbiface.DynamoDBAPI, ta return tableOutput.Table.LatestStreamArn, nil } -// IsActive determines if we need to scale from zero -func (s *awsDynamoDBStreamsScaler) IsActive(ctx context.Context) (bool, error) { - count, err := s.GetDynamoDBStreamShardCount(ctx) - if err != nil { - return false, err - } - return count > s.metadata.activationTargetShardCount, nil -} - func (s *awsDynamoDBStreamsScaler) Close(context.Context) error { return nil } @@ -172,22 +161,18 @@ func (s *awsDynamoDBStreamsScaler) GetMetricSpecForScaling(context.Context) []v2 return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsDynamoDBStreamsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *awsDynamoDBStreamsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { shardCount, err := s.GetDynamoDBStreamShardCount(ctx) if err != nil { s.logger.Error(err, "error getting shard count") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } - metric := external_metrics.ExternalMetricValue{ - MetricName: metricName, - Value: *resource.NewQuantity(shardCount, resource.DecimalSI), - Timestamp: metav1.Now(), - } + metric := GenerateMetricInMili(metricName, float64(shardCount)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, shardCount > s.metadata.activationTargetShardCount, nil } // Get DynamoDB Stream Shard Count diff --git a/pkg/scalers/aws_dynamodb_streams_scaler_test.go b/pkg/scalers/aws_dynamodb_streams_scaler_test.go index c9cf2fed967..c792c8f5299 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler_test.go @@ -421,7 +421,7 @@ func TestAwsDynamoDBStreamsScalerGetMetrics(t *testing.T) { streamArn, err = getDynamoDBStreamsArn(ctx, &mockAwsDynamoDB{}, &meta.tableName) if err == nil { scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Discard()} - value, err = scaler.GetMetrics(context.Background(), "MetricName") + value, _, err = scaler.GetMetricsAndActivity(context.Background(), "MetricName") } switch meta.tableName { case testAWSDynamoDBErrorTable: @@ -445,7 +445,7 @@ func TestAwsDynamoDBStreamsScalerIsActive(t *testing.T) { streamArn, err = getDynamoDBStreamsArn(ctx, &mockAwsDynamoDB{}, &meta.tableName) if err == nil { scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Discard()} - value, err = scaler.IsActive(context.Background()) + _, value, err = scaler.GetMetricsAndActivity(context.Background(), "MetricName") } switch meta.tableName { case testAWSDynamoDBErrorTable: diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go index 99f877d4ca4..fdab9daee36 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler.go +++ b/pkg/scalers/aws_kinesis_stream_scaler.go @@ -118,17 +118,6 @@ func createKinesisClient(metadata *awsKinesisStreamMetadata) *kinesis.Kinesis { return kinesis.New(sess, config) } -// IsActive determines if we need to scale from zero -func (s *awsKinesisStreamScaler) IsActive(ctx context.Context) (bool, error) { - count, err := s.GetAwsKinesisOpenShardCount() - - if err != nil { - return false, err - } - - return count > s.metadata.activationTargetShardCount, nil -} - func (s *awsKinesisStreamScaler) Close(context.Context) error { return nil } @@ -144,18 +133,18 @@ func (s *awsKinesisStreamScaler) GetMetricSpecForScaling(context.Context) []v2.M return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsKinesisStreamScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *awsKinesisStreamScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { shardCount, err := s.GetAwsKinesisOpenShardCount() if err != nil { s.logger.Error(err, "Error getting shard count") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(shardCount)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, shardCount > s.metadata.activationTargetShardCount, nil } // Get Kinesis open shard count diff --git a/pkg/scalers/aws_kinesis_stream_scaler_test.go b/pkg/scalers/aws_kinesis_stream_scaler_test.go index 2422308c309..f857d6701b8 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler_test.go +++ b/pkg/scalers/aws_kinesis_stream_scaler_test.go @@ -351,7 +351,7 @@ func TestAWSKinesisGetMetricSpecForScaling(t *testing.T) { func TestAWSKinesisStreamScalerGetMetrics(t *testing.T) { for _, meta := range awsKinesisGetMetricTestData { scaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "MetricName") + value, _, err := scaler.GetMetricsAndActivity(context.Background(), "MetricName") switch meta.streamName { case testAWSKinesisErrorStream: assert.Error(t, err, "expect error because of kinesis api error") diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index e585514b034..aa15f458148 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -166,17 +166,6 @@ func createSqsClient(metadata *awsSqsQueueMetadata) *sqs.SQS { return sqs.New(sess, config) } -// IsActive determines if we need to scale from zero -func (s *awsSqsQueueScaler) IsActive(ctx context.Context) (bool, error) { - length, err := s.getAwsSqsQueueLength() - - if err != nil { - return false, err - } - - return length > s.metadata.activationTargetQueueLength, nil -} - func (s *awsSqsQueueScaler) Close(context.Context) error { return nil } @@ -192,18 +181,18 @@ func (s *awsSqsQueueScaler) GetMetricSpecForScaling(context.Context) []v2.Metric return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsSqsQueueScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *awsSqsQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queuelen, err := s.getAwsSqsQueueLength() if err != nil { s.logger.Error(err, "Error getting queue length") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(queuelen)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetQueueLength, nil } // Get SQS Queue Length diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go index 35eba831582..8f00bfeed7c 100644 --- a/pkg/scalers/aws_sqs_queue_scaler_test.go +++ b/pkg/scalers/aws_sqs_queue_scaler_test.go @@ -345,7 +345,7 @@ func TestAWSSQSGetMetricSpecForScaling(t *testing.T) { func TestAWSSQSScalerGetMetrics(t *testing.T) { for _, meta := range awsSQSGetMetricTestData { scaler := awsSqsQueueScaler{"", meta, &mockSqs{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "MetricName") + value, _, err := scaler.GetMetricsAndActivity(context.Background(), "MetricName") switch meta.queueURL { case testAWSSQSErrorQueueURL: assert.Error(t, err, "expect error because of sqs api error") diff --git a/pkg/scalers/azure_app_insights_scaler.go b/pkg/scalers/azure_app_insights_scaler.go index 8d12a565897..3eafd4c585e 100644 --- a/pkg/scalers/azure_app_insights_scaler.go +++ b/pkg/scalers/azure_app_insights_scaler.go @@ -163,17 +163,6 @@ func parseAzureAppInsightsMetadata(config *ScalerConfig, logger logr.Logger) (*a return &meta, nil } -// Returns true if the Azure App Insights metric value is greater than the target value -func (s *azureAppInsightsScaler) IsActive(ctx context.Context) (bool, error) { - val, err := azure.GetAzureAppInsightsMetricValue(ctx, s.metadata.azureAppInsightsInfo, s.podIdentity) - if err != nil { - s.logger.Error(err, "error getting azure app insights metric") - return false, err - } - - return val > s.metadata.activationTargetValue, nil -} - func (s *azureAppInsightsScaler) Close(context.Context) error { return nil } @@ -189,15 +178,15 @@ func (s *azureAppInsightsScaler) GetMetricSpecForScaling(context.Context) []v2.M return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureAppInsightsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *azureAppInsightsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := azure.GetAzureAppInsightsMetricValue(ctx, s.metadata.azureAppInsightsInfo, s.podIdentity) if err != nil { s.logger.Error(err, "error getting azure app insights metric") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationTargetValue, nil } diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go index 20913d40389..f67e589994c 100644 --- a/pkg/scalers/azure_blob_scaler.go +++ b/pkg/scalers/azure_blob_scaler.go @@ -180,23 +180,6 @@ func parseAzureBlobMetadata(config *ScalerConfig, logger logr.Logger) (*azure.Bl return &meta, config.PodIdentity, nil } -// GetScaleDecision is a func -func (s *azureBlobScaler) IsActive(ctx context.Context) (bool, error) { - length, err := azure.GetAzureBlobListLength( - ctx, - s.httpClient, - s.podIdentity, - s.metadata, - ) - - if err != nil { - s.logger.Error(err, "error") - return false, err - } - - return length > s.metadata.ActivationTargetBlobCount, nil -} - func (s *azureBlobScaler) Close(context.Context) error { return nil } @@ -212,8 +195,8 @@ func (s *azureBlobScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *azureBlobScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { bloblen, err := azure.GetAzureBlobListLength( ctx, s.httpClient, @@ -223,10 +206,10 @@ func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string) ([] if err != nil { s.logger.Error(err, "error getting blob list length") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(bloblen)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, bloblen > s.metadata.ActivationTargetBlobCount, nil } diff --git a/pkg/scalers/azure_data_explorer_scaler.go b/pkg/scalers/azure_data_explorer_scaler.go index a95856b7f88..b75bd005b2b 100644 --- a/pkg/scalers/azure_data_explorer_scaler.go +++ b/pkg/scalers/azure_data_explorer_scaler.go @@ -169,14 +169,15 @@ func parseAzureDataExplorerAuthParams(config *ScalerConfig, logger logr.Logger) return &metadata, nil } -func (s azureDataExplorerScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s azureDataExplorerScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { metricValue, err := azure.GetAzureDataExplorerMetricValue(ctx, s.client, s.metadata.DatabaseName, s.metadata.Query) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to get metrics for scaled object %s in namespace %s: %v", s.name, s.namespace, err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to get metrics for scaled object %s in namespace %s: %v", s.name, s.namespace, err) } metric := GenerateMetricInMili(metricName, metricValue) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + + return []external_metrics.ExternalMetricValue{metric}, metricValue > s.metadata.ActivationThreshold, nil } func (s azureDataExplorerScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -190,15 +191,6 @@ func (s azureDataExplorerScaler) GetMetricSpecForScaling(context.Context) []v2.M return []v2.MetricSpec{metricSpec} } -func (s azureDataExplorerScaler) IsActive(ctx context.Context) (bool, error) { - metricValue, err := azure.GetAzureDataExplorerMetricValue(ctx, s.client, s.metadata.DatabaseName, s.metadata.Query) - if err != nil { - return false, fmt.Errorf("failed to get azure data explorer metric value: %s", err) - } - - return metricValue > s.metadata.ActivationThreshold, nil -} - func (s azureDataExplorerScaler) Close(context.Context) error { return nil } diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go index c96e390c188..e8bc509008e 100644 --- a/pkg/scalers/azure_eventhub_scaler.go +++ b/pkg/scalers/azure_eventhub_scaler.go @@ -431,3 +431,20 @@ func (s *azureEventHubScaler) Close(ctx context.Context) error { return nil } + +// TODO merge isActive() and GetMetrics() +func (s *azureEventHubScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + metrics, err := s.GetMetrics(ctx, metricName) + if err != nil { + s.logger.Error(err, "error getting metrics") + return []external_metrics.ExternalMetricValue{}, false, err + } + + isActive, err := s.IsActive(ctx) + if err != nil { + s.logger.Error(err, "error getting activity status") + return []external_metrics.ExternalMetricValue{}, false, err + } + + return metrics, isActive, nil +} diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go index e055e470feb..ed6f6dc8ede 100644 --- a/pkg/scalers/azure_log_analytics_scaler.go +++ b/pkg/scalers/azure_log_analytics_scaler.go @@ -50,7 +50,6 @@ const ( type azureLogAnalyticsScaler struct { metricType v2.MetricTargetType metadata *azureLogAnalyticsMetadata - cache *sessionCache name string namespace string httpClient *http.Client @@ -72,11 +71,6 @@ type azureLogAnalyticsMetadata struct { activeDirectoryEndpoint string } -type sessionCache struct { - metricValue float64 - metricThreshold float64 -} - type tokenData struct { TokenType string `json:"token_type"` ExpiresIn int `json:"expires_in,string"` @@ -130,7 +124,6 @@ func NewAzureLogAnalyticsScaler(config *ScalerConfig) (Scaler, error) { return &azureLogAnalyticsScaler{ metricType: metricType, metadata: azureLogAnalyticsMetadata, - cache: &sessionCache{metricValue: -1, metricThreshold: -1}, name: config.ScalableObjectName, namespace: config.ScalableObjectNamespace, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), @@ -252,72 +245,34 @@ func getParameterFromConfig(config *ScalerConfig, parameter string, checkAuthPar return "", fmt.Errorf("error parsing metadata. Details: %s was not found in metadata. Check your ScaledObject configuration", parameter) } -// IsActive determines if we need to scale from zero -func (s *azureLogAnalyticsScaler) IsActive(ctx context.Context) (bool, error) { - err := s.updateCache(ctx) - - if err != nil { - return false, fmt.Errorf("failed to execute IsActive function. Scaled object: %s. Namespace: %s. Inner Error: %v", s.name, s.namespace, err) - } - - return s.cache.metricValue > s.metadata.activationThreshold, nil -} - -func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec { - err := s.updateCache(ctx) - - if err != nil { - s.logger.V(1).Info("failed to get metric spec.", "Scaled object", s.name, "Namespace", s.namespace, "Inner Error", err) - return nil - } - +func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName), }, - Target: GetMetricTargetMili(s.metricType, s.cache.metricThreshold), + Target: GetMetricTargetMili(s.metricType, s.metadata.threshold), } metricSpec := v2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureLogAnalyticsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *azureLogAnalyticsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { receivedMetric, err := s.getMetricData(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to get metrics. Scaled object: %s. Namespace: %s. Inner Error: %v", s.name, s.namespace, err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to get metrics. Scaled object: %s. Namespace: %s. Inner Error: %v", s.name, s.namespace, err) } metric := GenerateMetricInMili(metricName, receivedMetric.value) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, receivedMetric.value > s.metadata.activationThreshold, nil } func (s *azureLogAnalyticsScaler) Close(context.Context) error { return nil } -func (s *azureLogAnalyticsScaler) updateCache(ctx context.Context) error { - if s.cache.metricValue < 0 { - receivedMetric, err := s.getMetricData(ctx) - - if err != nil { - return err - } - - s.cache.metricValue = receivedMetric.value - - if receivedMetric.threshold > 0 { - s.cache.metricThreshold = receivedMetric.threshold - } else { - s.cache.metricThreshold = s.metadata.threshold - } - } - - return nil -} - func (s *azureLogAnalyticsScaler) getMetricData(ctx context.Context) (metricsData, error) { tokenInfo, err := s.getAccessToken(ctx) if err != nil { diff --git a/pkg/scalers/azure_log_analytics_scaler_test.go b/pkg/scalers/azure_log_analytics_scaler_test.go index c17be86921b..4ae0cf6224a 100644 --- a/pkg/scalers/azure_log_analytics_scaler_test.go +++ b/pkg/scalers/azure_log_analytics_scaler_test.go @@ -198,10 +198,8 @@ func TestLogAnalyticsGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - cache := &sessionCache{metricValue: 1, metricThreshold: 2} mockLogAnalyticsScaler := azureLogAnalyticsScaler{ metadata: meta, - cache: cache, name: "test-so", namespace: "test-ns", httpClient: http.DefaultClient, diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go index fdb75607a16..1de83010d6a 100644 --- a/pkg/scalers/azure_monitor_scaler.go +++ b/pkg/scalers/azure_monitor_scaler.go @@ -213,17 +213,6 @@ func parseAzurePodIdentityParams(config *ScalerConfig) (clientID string, clientP return clientID, clientPassword, nil } -// Returns true if the Azure Monitor metric value is greater than zero -func (s *azureMonitorScaler) IsActive(ctx context.Context) (bool, error) { - val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) - if err != nil { - s.logger.Error(err, "error getting azure monitor metric") - return false, err - } - - return val > s.metadata.activationTargetValue, nil -} - func (s *azureMonitorScaler) Close(context.Context) error { return nil } @@ -239,15 +228,15 @@ func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2.Metri return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureMonitorScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *azureMonitorScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) if err != nil { s.logger.Error(err, "error getting azure monitor metric") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationTargetValue, nil } diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index 7ec6eed649f..3112f6e8236 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -309,19 +309,6 @@ func getAzurePipelineRequest(ctx context.Context, url string, metadata *azurePip return b, nil } -func (s *azurePipelinesScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - queuelen, err := s.GetAzurePipelinesQueueLength(ctx) - - if err != nil { - s.logger.Error(err, "error getting pipelines queue length") - return []external_metrics.ExternalMetricValue{}, err - } - - metric := GenerateMetricInMili(metricName, float64(queuelen)) - - return append([]external_metrics.ExternalMetricValue{}, metric), nil -} - func (s *azurePipelinesScaler) GetAzurePipelinesQueueLength(ctx context.Context) (int64, error) { url := fmt.Sprintf("%s/_apis/distributedtask/pools/%d/jobrequests", s.metadata.organizationURL, s.metadata.poolID) body, err := getAzurePipelineRequest(ctx, url, s.metadata, s.httpClient) @@ -416,15 +403,17 @@ func (s *azurePipelinesScaler) GetMetricSpecForScaling(context.Context) []v2.Met return []v2.MetricSpec{metricSpec} } -func (s *azurePipelinesScaler) IsActive(ctx context.Context) (bool, error) { +func (s *azurePipelinesScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queuelen, err := s.GetAzurePipelinesQueueLength(ctx) if err != nil { - s.logger.Error(err, "error)") - return false, err + s.logger.Error(err, "error getting pipelines queue length") + return []external_metrics.ExternalMetricValue{}, false, err } - return queuelen > s.metadata.activationTargetPipelinesQueueLength, nil + metric := GenerateMetricInMili(metricName, float64(queuelen)) + + return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetPipelinesQueueLength, nil } func (s *azurePipelinesScaler) Close(context.Context) error { diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 307836edaa6..6b208750761 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -158,26 +158,6 @@ func parseAzureQueueMetadata(config *ScalerConfig, logger logr.Logger) (*azureQu return &meta, config.PodIdentity, nil } -// IsActive determines whether this scaler is currently active -func (s *azureQueueScaler) IsActive(ctx context.Context) (bool, error) { - length, err := azure.GetAzureQueueLength( - ctx, - s.httpClient, - s.podIdentity, - s.metadata.connection, - s.metadata.queueName, - s.metadata.accountName, - s.metadata.endpointSuffix, - ) - - if err != nil { - s.logger.Error(err, "error)") - return false, err - } - - return length > s.metadata.activationTargetQueueLength, nil -} - func (s *azureQueueScaler) Close(context.Context) error { return nil } @@ -193,8 +173,8 @@ func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureQueueScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *azureQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queuelen, err := azure.GetAzureQueueLength( ctx, s.httpClient, @@ -207,10 +187,10 @@ func (s *azureQueueScaler) GetMetrics(ctx context.Context, metricName string) ([ if err != nil { s.logger.Error(err, "error getting queue length") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(queuelen)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetQueueLength, nil } diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go index 590624de52a..b8da096a39c 100755 --- a/pkg/scalers/azure_servicebus_scaler.go +++ b/pkg/scalers/azure_servicebus_scaler.go @@ -226,17 +226,6 @@ func parseAzureServiceBusMetadata(config *ScalerConfig, logger logr.Logger) (*az return &meta, nil } -// Returns true if the scaler's queue has messages in it, false otherwise -func (s *azureServiceBusScaler) IsActive(ctx context.Context) (bool, error) { - length, err := s.getAzureServiceBusLength(ctx) - if err != nil { - s.logger.Error(err, "error") - return false, err - } - - return length > s.metadata.activationTargetLength, nil -} - // Close - nothing to close for SB func (s *azureServiceBusScaler) Close(context.Context) error { return nil @@ -269,18 +258,18 @@ func (s *azureServiceBusScaler) GetMetricSpecForScaling(context.Context) []v2.Me return []v2.MetricSpec{metricSpec} } -// Returns the current metrics to be served to the HPA -func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns the current metrics to be served to the HPA +func (s *azureServiceBusScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queuelen, err := s.getAzureServiceBusLength(ctx) if err != nil { s.logger.Error(err, "error getting service bus entity length") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(queuelen)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetLength, nil } // Returns the length of the queue or subscription diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go index 79a8259ca70..f0a48e080e1 100644 --- a/pkg/scalers/cassandra_scaler.go +++ b/pkg/scalers/cassandra_scaler.go @@ -180,18 +180,8 @@ func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Se return session, nil } -// IsActive returns true if there are pending events to be processed. -func (s *cassandraScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.GetQueryResult(ctx) - if err != nil { - return false, fmt.Errorf("error inspecting cassandra: %s", err) - } - - return messages > s.metadata.activationTargetQueryValue, nil -} - // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler. -func (s *cassandraScaler) GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec { +func (s *cassandraScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName), @@ -205,16 +195,16 @@ func (s *cassandraScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metr return []v2.MetricSpec{metricSpec} } -// GetMetrics returns a value for a supported metric or an error if there is a problem getting the metric. -func (s *cassandraScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns a value for a supported metric or an error if there is a problem getting the metric. +func (s *cassandraScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.GetQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting cassandra: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting cassandra: %s", err) } metric := GenerateMetricInMili(metricName, float64(num)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetQueryValue, nil } // GetQueryResult returns the result of the scaler query. diff --git a/pkg/scalers/couchdb_scaler.go b/pkg/scalers/couchdb_scaler.go index d10d0d1d87d..c5e903be729 100644 --- a/pkg/scalers/couchdb_scaler.go +++ b/pkg/scalers/couchdb_scaler.go @@ -97,15 +97,6 @@ func (s *couchDBScaler) getQueryResult(ctx context.Context) (int64, error) { return count, nil } -func (s *couchDBScaler) IsActive(ctx context.Context) (bool, error) { - result, err := s.getQueryResult(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to get query result by couchDB, because of %v", err)) - return false, err - } - return result > s.metadata.activationQueryValue, nil -} - func parseCouchDBMetadata(config *ScalerConfig) (*couchDBMetadata, string, error) { var connStr string var err error @@ -227,14 +218,14 @@ func NewCouchDBScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) }, nil } -// GetMetrics query from couchDB,and return to external metrics -func (s *couchDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - num, err := s.getQueryResult(ctx) +// GetMetricsAndActivity query from couchDB,and return to external metrics and activity +func (s *couchDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + result, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to inspect couchdb, because of %v", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect couchdb, because of %v", err) } - metric := GenerateMetricInMili(metricName, float64(num)) + metric := GenerateMetricInMili(metricName, float64(result)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return append([]external_metrics.ExternalMetricValue{}, metric), result > s.metadata.activationQueryValue, nil } diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index cfd7db7c84b..fef14738aeb 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -81,11 +81,6 @@ func parseResourceMetadata(config *ScalerConfig, logger logr.Logger) (*cpuMemory return meta, nil } -// IsActive always return true for cpu/memory scaler -func (s *cpuMemoryScaler) IsActive(ctx context.Context) (bool, error) { - return true, nil -} - // Close no need for cpuMemory scaler func (s *cpuMemoryScaler) Close(context.Context) error { return nil @@ -121,7 +116,7 @@ func (s *cpuMemoryScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp return []v2.MetricSpec{metricSpec} } -// GetMetrics no need for cpu/memory scaler -func (s *cpuMemoryScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - return nil, nil +// GetMetrics no need for cpu/memory scaler and always active for cpu/memory scaler +func (s *cpuMemoryScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + return nil, true, nil } diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go index 1fee018b18b..e1de9c05159 100644 --- a/pkg/scalers/cron_scaler.go +++ b/pkg/scalers/cron_scaler.go @@ -184,5 +184,22 @@ func (s *cronScaler) GetMetrics(ctx context.Context, metricName string) ([]exter /*******************************************************************************/ metric := GenerateMetricInMili(metricName, float64(currentReplicas)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, nil +} + +// TODO merge isActive() and GetMetrics() +func (s *cronScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + metrics, err := s.GetMetrics(ctx, metricName) + if err != nil { + s.logger.Error(err, "error getting metrics") + return []external_metrics.ExternalMetricValue{}, false, err + } + + isActive, err := s.IsActive(ctx) + if err != nil { + s.logger.Error(err, "error getting activity status") + return []external_metrics.ExternalMetricValue{}, false, err + } + + return metrics, isActive, nil } diff --git a/pkg/scalers/cron_scaler_test.go b/pkg/scalers/cron_scaler_test.go index 33123800afb..b0643186d65 100644 --- a/pkg/scalers/cron_scaler_test.go +++ b/pkg/scalers/cron_scaler_test.go @@ -73,7 +73,7 @@ func TestCronParseMetadata(t *testing.T) { func TestIsActive(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata}) - isActive, _ := scaler.IsActive(context.TODO()) + _, isActive, _ := scaler.GetMetricsAndActivity(context.TODO(), "ReplicaCount") if currentDay == "Thursday" { assert.Equal(t, isActive, true) } else { @@ -83,7 +83,7 @@ func TestIsActive(t *testing.T) { func TestIsActiveRange(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata2}) - isActive, _ := scaler.IsActive(context.TODO()) + _, isActive, _ := scaler.GetMetricsAndActivity(context.TODO(), "ReplicaCount") if currentHour%2 == 0 { assert.Equal(t, isActive, true) } else { @@ -93,7 +93,7 @@ func TestIsActiveRange(t *testing.T) { func TestGetMetrics(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata}) - metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount") + metrics, _, _ := scaler.GetMetricsAndActivity(context.TODO(), "ReplicaCount") assert.Equal(t, metrics[0].MetricName, "ReplicaCount") if currentDay == "Thursday" { assert.Equal(t, metrics[0].Value.Value(), int64(10)) @@ -104,7 +104,7 @@ func TestGetMetrics(t *testing.T) { func TestGetMetricsRange(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata2}) - metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount") + metrics, _, _ := scaler.GetMetricsAndActivity(context.TODO(), "ReplicaCount") assert.Equal(t, metrics[0].MetricName, "ReplicaCount") if currentHour%2 == 0 { assert.Equal(t, metrics[0].Value.Value(), int64(10)) diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go index ea03a2cba94..ea3c9e59389 100644 --- a/pkg/scalers/datadog_scaler.go +++ b/pkg/scalers/datadog_scaler.go @@ -262,17 +262,6 @@ func (s *datadogScaler) Close(context.Context) error { return nil } -// IsActive checks whether the scaler is active -func (s *datadogScaler) IsActive(ctx context.Context) (bool, error) { - num, err := s.getQueryResult(ctx) - - if err != nil { - return false, err - } - - return num > s.metadata.activationQueryValue, nil -} - // getQueryResult returns result of the scaler query func (s *datadogScaler) getQueryResult(ctx context.Context) (float64, error) { ctx = context.WithValue( @@ -383,17 +372,17 @@ func (s *datadogScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *datadogScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *datadogScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { s.logger.Error(err, "error getting metrics from Datadog") - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error getting metrics from Datadog: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error getting metrics from Datadog: %s", err) } metric := GenerateMetricInMili(metricName, num) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil } // Find the largest value in a slice of floats diff --git a/pkg/scalers/elasticsearch_scaler.go b/pkg/scalers/elasticsearch_scaler.go index c2aeb57af3e..6d106bf510b 100644 --- a/pkg/scalers/elasticsearch_scaler.go +++ b/pkg/scalers/elasticsearch_scaler.go @@ -251,16 +251,6 @@ func (s *elasticsearchScaler) Close(ctx context.Context) error { return nil } -// IsActive returns true if there are pending messages to be processed -func (s *elasticsearchScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.getQueryResult(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("Error inspecting elasticsearch: %s", err)) - return false, err - } - return messages > s.metadata.activationTargetValue, nil -} - // getQueryResult returns result of the scaler query func (s *elasticsearchScaler) getQueryResult(ctx context.Context) (float64, error) { // Build the request body. @@ -339,16 +329,16 @@ func (s *elasticsearchScaler) GetMetricSpecForScaling(context.Context) []v2.Metr return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *elasticsearchScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *elasticsearchScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting elasticsearch: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting elasticsearch: %s", err) } metric := GenerateMetricInMili(metricName, num) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetValue, nil } // Splits a string separated by a specified separator and trims space from all the elements. diff --git a/pkg/scalers/etcd_scaler.go b/pkg/scalers/etcd_scaler.go index 10b61c992dd..01add40107a 100644 --- a/pkg/scalers/etcd_scaler.go +++ b/pkg/scalers/etcd_scaler.go @@ -169,16 +169,6 @@ func getEtcdClients(metadata *etcdMetadata) (*clientv3.Client, error) { return cli, nil } -// IsActive determines if we need to scale from zero -func (s *etcdScaler) IsActive(ctx context.Context) (bool, error) { - v, err := s.getMetricValue(ctx) - if err != nil { - return false, err - } - - return v > s.metadata.activationValue, nil -} - // Close closes the etcd client func (s *etcdScaler) Close(context.Context) error { if s.client != nil { @@ -187,15 +177,15 @@ func (s *etcdScaler) Close(context.Context) error { return nil } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *etcdScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *etcdScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { v, err := s.getMetricValue(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error getting metric value: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error getting metric value: %s", err) } metric := GenerateMetricInMili(metricName, v) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return append([]external_metrics.ExternalMetricValue{}, metric), v > s.metadata.activationValue, nil } // GetMetricSpecForScaling returns the metric spec for the HPA. diff --git a/pkg/scalers/external_mock_scaler.go b/pkg/scalers/external_mock_scaler.go index 2e880e1b103..ee9a2bbc60a 100644 --- a/pkg/scalers/external_mock_scaler.go +++ b/pkg/scalers/external_mock_scaler.go @@ -30,22 +30,13 @@ func NewExternalMockScaler(config *ScalerConfig) (Scaler, error) { return &externalMockScaler{}, nil } -// IsActive implements Scaler -func (*externalMockScaler) IsActive(ctx context.Context) (bool, error) { - if atomic.LoadInt32(&MockExternalServerStatus) != MockExternalServerStatusOnline { - return false, ErrMock - } - - return true, nil -} - // Close implements Scaler func (*externalMockScaler) Close(ctx context.Context) error { return nil } // GetMetricSpecForScaling implements Scaler -func (*externalMockScaler) GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec { +func (*externalMockScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { if atomic.LoadInt32(&MockExternalServerStatus) != MockExternalServerStatusOnline { return nil } @@ -53,13 +44,13 @@ func (*externalMockScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Met return getMockMetricsSpecs() } -// GetMetrics implements Scaler -func (*externalMockScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity implements Scaler +func (*externalMockScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { if atomic.LoadInt32(&MockExternalServerStatus) != MockExternalServerStatusOnline { - return nil, ErrMock + return nil, false, ErrMock } - return getMockExternalMetricsValue(), nil + return getMockExternalMetricsValue(), true, nil } func getMockMetricsSpecs() []v2.MetricSpec { diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go index 2b83ee26cac..b9fb43988c2 100644 --- a/pkg/scalers/external_scaler.go +++ b/pkg/scalers/external_scaler.go @@ -217,6 +217,23 @@ func (s *externalScaler) GetMetrics(ctx context.Context, metricName string) ([]e return metrics, nil } +// TODO merge isActive() and GetMetrics() +func (s *externalScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + metrics, err := s.GetMetrics(ctx, metricName) + if err != nil { + s.logger.Error(err, "error getting metrics") + return []external_metrics.ExternalMetricValue{}, false, err + } + + isActive, err := s.IsActive(ctx) + if err != nil { + s.logger.Error(err, "error getting activity status") + return []external_metrics.ExternalMetricValue{}, false, err + } + + return metrics, isActive, nil +} + // handleIsActiveStream is the only writer to the active channel and will close it on return. func (s *externalPushScaler) Run(ctx context.Context, active chan<- bool) { defer close(active) diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go index 2149f5ff8c4..7a09b6a40bd 100644 --- a/pkg/scalers/gcp_pubsub_scaler.go +++ b/pkg/scalers/gcp_pubsub_scaler.go @@ -135,28 +135,6 @@ func parsePubSubMetadata(config *ScalerConfig, logger logr.Logger) (*pubsubMetad return &meta, nil } -// IsActive checks if there are any messages in the subscription -func (s *pubsubScaler) IsActive(ctx context.Context) (bool, error) { - switch s.metadata.mode { - case pubsubModeSubscriptionSize: - size, err := s.getMetrics(ctx, pubSubStackDriverSubscriptionSizeMetricName) - if err != nil { - s.logger.Error(err, "error getting Active Status") - return false, err - } - return size > s.metadata.activationValue, nil - case pubsubModeOldestUnackedMessageAge: - delay, err := s.getMetrics(ctx, pubSubStackDriverOldestUnackedMessageAgeMetricName) - if err != nil { - s.logger.Error(err, "error getting Active Status") - return false, err - } - return delay > s.metadata.activationValue, nil - default: - return false, errors.New("unknown mode") - } -} - func (s *pubsubScaler) Close(context.Context) error { if s.client != nil { err := s.client.metricsClient.Close() @@ -187,8 +165,8 @@ func (s *pubsubScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec return []v2.MetricSpec{metricSpec} } -// GetMetrics connects to Stack Driver and finds the size of the pub sub subscription -func (s *pubsubScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity connects to Stack Driver and finds the size of the pub sub subscription +func (s *pubsubScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { var value float64 var err error @@ -197,19 +175,19 @@ func (s *pubsubScaler) GetMetrics(ctx context.Context, metricName string) ([]ext value, err = s.getMetrics(ctx, pubSubStackDriverSubscriptionSizeMetricName) if err != nil { s.logger.Error(err, "error getting subscription size") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } case pubsubModeOldestUnackedMessageAge: value, err = s.getMetrics(ctx, pubSubStackDriverOldestUnackedMessageAgeMetricName) if err != nil { s.logger.Error(err, "error getting oldest unacked message age") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } } metric := GenerateMetricInMili(metricName, value) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, value > s.metadata.activationValue, nil } func (s *pubsubScaler) setStackdriverClient(ctx context.Context) error { diff --git a/pkg/scalers/gcp_stackdriver_scaler.go b/pkg/scalers/gcp_stackdriver_scaler.go index 6a4d8b83f04..e91b9fbeff7 100644 --- a/pkg/scalers/gcp_stackdriver_scaler.go +++ b/pkg/scalers/gcp_stackdriver_scaler.go @@ -161,15 +161,6 @@ func initializeStackdriverClient(ctx context.Context, gcpAuthorization *gcpAutho return client, nil } -func (s *stackdriverScaler) IsActive(ctx context.Context) (bool, error) { - value, err := s.getMetrics(ctx) - if err != nil { - s.logger.Error(err, "error getting metric value") - return false, err - } - return value > s.metadata.activationTargetValue, nil -} - func (s *stackdriverScaler) Close(context.Context) error { if s.client != nil { err := s.client.metricsClient.Close() @@ -200,17 +191,17 @@ func (s *stackdriverScaler) GetMetricSpecForScaling(context.Context) []v2.Metric return []v2.MetricSpec{metricSpec} } -// GetMetrics connects to Stack Driver and retrieves the metric -func (s *stackdriverScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity connects to Stack Driver and retrieves the metric +func (s *stackdriverScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { value, err := s.getMetrics(ctx) if err != nil { s.logger.Error(err, "error getting metric value") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, value) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, value > s.metadata.activationTargetValue, nil } // getMetrics gets metric type value from stackdriver api diff --git a/pkg/scalers/gcp_storage_scaler.go b/pkg/scalers/gcp_storage_scaler.go index 5600fc9a2ac..5efe3e77c6a 100644 --- a/pkg/scalers/gcp_storage_scaler.go +++ b/pkg/scalers/gcp_storage_scaler.go @@ -157,16 +157,6 @@ func parseGcsMetadata(config *ScalerConfig, logger logr.Logger) (*gcsMetadata, e return &meta, nil } -// IsActive checks if there are any messages in the subscription -func (s *gcsScaler) IsActive(ctx context.Context) (bool, error) { - items, err := s.getItemCount(ctx, s.metadata.activationTargetObjectCount+1) - if err != nil { - return false, err - } - - return items > s.metadata.activationTargetObjectCount, nil -} - func (s *gcsScaler) Close(context.Context) error { if s.client != nil { return s.client.Close() @@ -186,16 +176,16 @@ func (s *gcsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics returns the number of items in the bucket (up to s.metadata.maxBucketItemsToScan) -func (s *gcsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns the number of items in the bucket (up to s.metadata.maxBucketItemsToScan) +func (s *gcsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { items, err := s.getItemCount(ctx, s.metadata.maxBucketItemsToScan) if err != nil { - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(items)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, items > s.metadata.activationTargetObjectCount, nil } // getItemCount gets the number of items in the bucket, up to maxCount diff --git a/pkg/scalers/graphite_scaler.go b/pkg/scalers/graphite_scaler.go index 0c14982016f..f9e88620c7e 100644 --- a/pkg/scalers/graphite_scaler.go +++ b/pkg/scalers/graphite_scaler.go @@ -148,16 +148,6 @@ func parseGraphiteMetadata(config *ScalerConfig) (*graphiteMetadata, error) { return &meta, nil } -func (s *graphiteScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.executeGrapQuery(ctx) - if err != nil { - s.logger.Error(err, "error executing graphite query") - return false, err - } - - return val > s.metadata.activationThreshold, nil -} - func (s *graphiteScaler) Close(context.Context) error { return nil } @@ -223,14 +213,14 @@ func (s *graphiteScaler) executeGrapQuery(ctx context.Context) (float64, error) return -1, fmt.Errorf("no valid non-null response in query %s, try increasing your queryTime or check your query", s.metadata.query) } -func (s *graphiteScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *graphiteScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.executeGrapQuery(ctx) if err != nil { s.logger.Error(err, "error executing graphite query") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } diff --git a/pkg/scalers/huawei_cloudeye_scaler.go b/pkg/scalers/huawei_cloudeye_scaler.go index f80bcc629cc..be9c4810b78 100644 --- a/pkg/scalers/huawei_cloudeye_scaler.go +++ b/pkg/scalers/huawei_cloudeye_scaler.go @@ -240,16 +240,16 @@ func gethuaweiAuthorization(authParams map[string]string) (huaweiAuthorizationMe return meta, nil } -func (s *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *huaweiCloudeyeScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { metricValue, err := s.GetCloudeyeMetrics() if err != nil { s.logger.Error(err, "Error getting metric value") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, metricValue) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, metricValue > s.metadata.activationTargetMetricValue, nil } func (s *huaweiCloudeyeScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -263,16 +263,6 @@ func (s *huaweiCloudeyeScaler) GetMetricSpecForScaling(context.Context) []v2.Met return []v2.MetricSpec{metricSpec} } -func (s *huaweiCloudeyeScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.GetCloudeyeMetrics() - - if err != nil { - return false, err - } - - return val > s.metadata.activationTargetMetricValue, nil -} - func (s *huaweiCloudeyeScaler) Close(context.Context) error { return nil } diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go index c6767d31b59..f9cfc88abaa 100644 --- a/pkg/scalers/ibmmq_scaler.go +++ b/pkg/scalers/ibmmq_scaler.go @@ -164,15 +164,6 @@ func parseIBMMQMetadata(config *ScalerConfig) (*IBMMQMetadata, error) { return &meta, nil } -// IsActive returns true if there are messages to be processed/if we need to scale from zero -func (s *IBMMQScaler) IsActive(ctx context.Context) (bool, error) { - queueDepth, err := s.getQueueDepthViaHTTP(ctx) - if err != nil { - return false, fmt.Errorf("error inspecting IBM MQ queue depth: %s", err) - } - return queueDepth > s.metadata.activationQueueDepth, nil -} - // getQueueDepthViaHTTP returns the depth of the MQ Queue from the Admin endpoint func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { queue := s.metadata.queueName @@ -228,14 +219,14 @@ func (s *IBMMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *IBMMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *IBMMQScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { queueDepth, err := s.getQueueDepthViaHTTP(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting IBM MQ queue depth: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting IBM MQ queue depth: %s", err) } metric := GenerateMetricInMili(metricName, float64(queueDepth)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, queueDepth > s.metadata.activationQueueDepth, nil } diff --git a/pkg/scalers/influxdb_scaler.go b/pkg/scalers/influxdb_scaler.go index cd1f48b8c5d..1da56243e90 100644 --- a/pkg/scalers/influxdb_scaler.go +++ b/pkg/scalers/influxdb_scaler.go @@ -164,18 +164,6 @@ func parseInfluxDBMetadata(config *ScalerConfig) (*influxDBMetadata, error) { }, nil } -// IsActive returns true if queried value is above the minimum value -func (s *influxDBScaler) IsActive(ctx context.Context) (bool, error) { - queryAPI := s.client.QueryAPI(s.metadata.organizationName) - - value, err := queryInfluxDB(ctx, queryAPI, s.metadata.query) - if err != nil { - return false, err - } - - return value > s.metadata.activationThresholdValue, nil -} - // Close closes the connection of the client to the server func (s *influxDBScaler) Close(context.Context) error { s.client.Close() @@ -206,19 +194,19 @@ func queryInfluxDB(ctx context.Context, queryAPI api.QueryAPI, query string) (fl } } -// GetMetrics connects to influxdb via the client and returns a value based on the query -func (s *influxDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity connects to influxdb via the client and returns a value based on the query +func (s *influxDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { // Grab QueryAPI to make queries to influxdb instance queryAPI := s.client.QueryAPI(s.metadata.organizationName) value, err := queryInfluxDB(ctx, queryAPI, s.metadata.query) if err != nil { - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, value) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, value > s.metadata.activationThresholdValue, nil } // GetMetricSpecForScaling returns the metric spec for the Horizontal Pod Autoscaler diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go index 8ce8a159694..c76c953b662 100644 --- a/pkg/scalers/kafka_scaler.go +++ b/pkg/scalers/kafka_scaler.go @@ -306,17 +306,6 @@ func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata return meta, nil } -// IsActive determines if we need to scale from zero -// When replicas is zero, all lag will be deemed as persistent, hence use totalLagWithPersistent to determine scaling. -func (s *kafkaScaler) IsActive(ctx context.Context) (bool, error) { - _, totalLagWithPersistent, err := s.getTotalLag() - if err != nil { - return false, err - } - - return totalLagWithPersistent > s.metadata.activationLagThreshold, nil -} - func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin, error) { config := sarama.NewConfig() config.Version = metadata.version @@ -574,15 +563,15 @@ func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][ return consumerRes.consumerOffsets, producerRes.producerOffsets, nil } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *kafkaScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - totalLag, _, err := s.getTotalLag() +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *kafkaScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + totalLag, totalLagWithPersistent, err := s.getTotalLag() if err != nil { - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(totalLag)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil } // getTotalLag returns totalLag, totalLagWithPersistent, error diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index 07a7812655c..a60f6a2eca3 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -87,17 +87,6 @@ func parseWorkloadMetadata(config *ScalerConfig) (*kubernetesWorkloadMetadata, e return meta, nil } -// IsActive determines if we need to scale from zero -func (s *kubernetesWorkloadScaler) IsActive(ctx context.Context) (bool, error) { - pods, err := s.getMetricValue(ctx) - - if err != nil { - return false, err - } - - return float64(pods) > s.metadata.activationValue, nil -} - // Close no need for kubernetes workload scaler func (s *kubernetesWorkloadScaler) Close(context.Context) error { return nil @@ -115,16 +104,16 @@ func (s *kubernetesWorkloadScaler) GetMetricSpecForScaling(context.Context) []v2 return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric -func (s *kubernetesWorkloadScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric +func (s *kubernetesWorkloadScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { pods, err := s.getMetricValue(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting kubernetes workload: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting kubernetes workload: %s", err) } metric := GenerateMetricInMili(metricName, float64(pods)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, float64(pods) > s.metadata.activationValue, nil } func (s *kubernetesWorkloadScaler) getMetricValue(ctx context.Context) (int64, error) { diff --git a/pkg/scalers/kubernetes_workload_scaler_test.go b/pkg/scalers/kubernetes_workload_scaler_test.go index d7e26e4da05..a015a77c3ba 100644 --- a/pkg/scalers/kubernetes_workload_scaler_test.go +++ b/pkg/scalers/kubernetes_workload_scaler_test.go @@ -75,7 +75,7 @@ func TestWorkloadIsActive(t *testing.T) { ScalableObjectNamespace: testData.namespace, }, ) - isActive, _ := s.IsActive(context.TODO()) + _, isActive, _ := s.GetMetricsAndActivity(context.TODO(), "Metric") if testData.active && !isActive { t.Error("Expected active but got inactive") } @@ -183,7 +183,7 @@ func TestWorkloadPhase(t *testing.T) { if err != nil { t.Errorf("Failed to create test scaler -- %v", err) } - isActive, err := s.IsActive(context.TODO()) + _, isActive, err := s.GetMetricsAndActivity(context.TODO(), "Metric") if err != nil { t.Errorf("Failed to count active -- %v", err) } diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index 18a9b752e4a..b101d7cc6fc 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -7,10 +7,10 @@ package liiklus import ( - empty "github.com/golang/protobuf/ptypes/empty" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -326,6 +326,7 @@ type SubscribeReply struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Reply: + // // *SubscribeReply_Assignment Reply isSubscribeReply_Reply `protobuf_oneof:"reply"` } @@ -536,6 +537,7 @@ type ReceiveReply struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Reply: + // // *ReceiveReply_Record_ Reply isReceiveReply_Reply `protobuf_oneof:"reply"` } @@ -805,11 +807,11 @@ type ReceiveReply_Record struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Replay bool `protobuf:"varint,5,opt,name=replay,proto3" json:"replay,omitempty"` + Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Replay bool `protobuf:"varint,5,opt,name=replay,proto3" json:"replay,omitempty"` } func (x *ReceiveReply_Record) Reset() { @@ -865,7 +867,7 @@ func (x *ReceiveReply_Record) GetValue() []byte { return nil } -func (x *ReceiveReply_Record) GetTimestamp() *timestamp.Timestamp { +func (x *ReceiveReply_Record) GetTimestamp() *timestamppb.Timestamp { if x != nil { return x.Timestamp } @@ -1071,8 +1073,8 @@ var file_LiiklusService_proto_goTypes = []interface{}{ (*ReceiveReply_Record)(nil), // 13: com.github.bsideup.liiklus.ReceiveReply.Record nil, // 14: com.github.bsideup.liiklus.GetOffsetsReply.OffsetsEntry nil, // 15: com.github.bsideup.liiklus.GetEndOffsetsReply.OffsetsEntry - (*timestamp.Timestamp)(nil), // 16: google.protobuf.Timestamp - (*empty.Empty)(nil), // 17: google.protobuf.Empty + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty } var file_LiiklusService_proto_depIdxs = []int32{ 0, // 0: com.github.bsideup.liiklus.SubscribeRequest.autoOffsetReset:type_name -> com.github.bsideup.liiklus.SubscribeRequest.AutoOffsetReset diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index 6fe70e2b323..d05170338af 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -8,10 +8,10 @@ package liiklus import ( context "context" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" ) // This is a compile-time assertion to ensure that this generated file @@ -26,7 +26,7 @@ type LiiklusServiceClient interface { Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishReply, error) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (LiiklusService_SubscribeClient, error) Receive(ctx context.Context, in *ReceiveRequest, opts ...grpc.CallOption) (LiiklusService_ReceiveClient, error) - Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) GetOffsets(ctx context.Context, in *GetOffsetsRequest, opts ...grpc.CallOption) (*GetOffsetsReply, error) GetEndOffsets(ctx context.Context, in *GetEndOffsetsRequest, opts ...grpc.CallOption) (*GetEndOffsetsReply, error) } @@ -112,8 +112,8 @@ func (x *liiklusServiceReceiveClient) Recv() (*ReceiveReply, error) { return m, nil } -func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *liiklusServiceClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/com.github.bsideup.liiklus.LiiklusService/Ack", in, out, opts...) if err != nil { return nil, err @@ -146,7 +146,7 @@ type LiiklusServiceServer interface { Publish(context.Context, *PublishRequest) (*PublishReply, error) Subscribe(*SubscribeRequest, LiiklusService_SubscribeServer) error Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error - Ack(context.Context, *AckRequest) (*empty.Empty, error) + Ack(context.Context, *AckRequest) (*emptypb.Empty, error) GetOffsets(context.Context, *GetOffsetsRequest) (*GetOffsetsReply, error) GetEndOffsets(context.Context, *GetEndOffsetsRequest) (*GetEndOffsetsReply, error) mustEmbedUnimplementedLiiklusServiceServer() @@ -165,7 +165,7 @@ func (UnimplementedLiiklusServiceServer) Subscribe(*SubscribeRequest, LiiklusSer func (UnimplementedLiiklusServiceServer) Receive(*ReceiveRequest, LiiklusService_ReceiveServer) error { return status.Errorf(codes.Unimplemented, "method Receive not implemented") } -func (UnimplementedLiiklusServiceServer) Ack(context.Context, *AckRequest) (*empty.Empty, error) { +func (UnimplementedLiiklusServiceServer) Ack(context.Context, *AckRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Ack not implemented") } func (UnimplementedLiiklusServiceServer) GetOffsets(context.Context, *GetOffsetsRequest) (*GetOffsetsReply, error) { diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go index 04198828e0d..f3f8996b945 100644 --- a/pkg/scalers/liiklus_scaler.go +++ b/pkg/scalers/liiklus_scaler.go @@ -74,19 +74,19 @@ func NewLiiklusScaler(config *ScalerConfig) (Scaler, error) { return &scaler, nil } -func (s *liiklusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *liiklusScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { totalLag, lags, err := s.getLag(ctx) if err != nil { - return nil, err + return nil, false, err } if totalLag/uint64(s.metadata.lagThreshold) > uint64(len(lags)) { totalLag = uint64(s.metadata.lagThreshold) * uint64(len(lags)) } - return []external_metrics.ExternalMetricValue{ - GenerateMetricInMili(metricName, float64(totalLag)), - }, nil + metric := GenerateMetricInMili(metricName, float64(totalLag)) + + return []external_metrics.ExternalMetricValue{metric}, totalLag > uint64(s.metadata.activationLagThreshold), nil } func (s *liiklusScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -108,15 +108,6 @@ func (s *liiklusScaler) Close(context.Context) error { return nil } -// IsActive returns true if there is any lag on any partition. -func (s *liiklusScaler) IsActive(ctx context.Context) (bool, error) { - lag, _, err := s.getLag(ctx) - if err != nil { - return false, err - } - return lag > uint64(s.metadata.activationLagThreshold), nil -} - // getLag returns the total lag, as well as per-partition lag for this scaler. That is, the difference between the // latest offset available on this scaler topic, and the position of the consumer group this scaler is configured for. func (s *liiklusScaler) getLag(ctx context.Context) (uint64, map[uint32]uint64, error) { diff --git a/pkg/scalers/liiklus_scaler_test.go b/pkg/scalers/liiklus_scaler_test.go index 16d95e65fca..b929300a44a 100644 --- a/pkg/scalers/liiklus_scaler_test.go +++ b/pkg/scalers/liiklus_scaler_test.go @@ -96,7 +96,7 @@ func TestLiiklusScalerActiveBehavior(t *testing.T) { GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 2}}, nil) - active, err := scaler.IsActive(context.Background()) + _, active, err := scaler.GetMetricsAndActivity(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return @@ -112,7 +112,7 @@ func TestLiiklusScalerActiveBehavior(t *testing.T) { GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 2}}, nil) - active, err = scaler.IsActive(context.Background()) + _, active, err = scaler.GetMetricsAndActivity(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return @@ -140,7 +140,7 @@ func TestLiiklusScalerGetMetricsBehavior(t *testing.T) { GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 20, 1: 30}}, nil) - values, err := scaler.GetMetrics(context.Background(), "m") + values, _, err := scaler.GetMetricsAndActivity(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return @@ -157,7 +157,7 @@ func TestLiiklusScalerGetMetricsBehavior(t *testing.T) { mockClient.EXPECT(). GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 20, 1: 30}}, nil) - values, err = scaler.GetMetrics(context.Background(), "m") + values, _, err = scaler.GetMetricsAndActivity(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return diff --git a/pkg/scalers/loki_scaler.go b/pkg/scalers/loki_scaler.go index d5301ee58bc..631ff08c446 100644 --- a/pkg/scalers/loki_scaler.go +++ b/pkg/scalers/loki_scaler.go @@ -158,17 +158,6 @@ func parseLokiMetadata(config *ScalerConfig) (meta *lokiMetadata, err error) { return meta, nil } -// IsActive validates the loki query result against the activation threshold -func (s *lokiScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.ExecuteLokiQuery(ctx) - if err != nil { - s.logger.Error(err, "error executing loki query") - return false, err - } - - return val > s.metadata.activationThreshold, nil -} - // Close returns a nil error func (s *lokiScaler) Close(context.Context) error { return nil @@ -273,15 +262,15 @@ func (s *lokiScaler) ExecuteLokiQuery(ctx context.Context) (float64, error) { return v, nil } -// GetMetrics returns an external metric value for the loki -func (s *lokiScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns an external metric value for the loki +func (s *lokiScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.ExecuteLokiQuery(ctx) if err != nil { s.logger.Error(err, "error executing loki query") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } diff --git a/pkg/scalers/metrics_api_scaler.go b/pkg/scalers/metrics_api_scaler.go index 08433269e89..4490802446b 100644 --- a/pkg/scalers/metrics_api_scaler.go +++ b/pkg/scalers/metrics_api_scaler.go @@ -256,17 +256,6 @@ func (s *metricsAPIScaler) Close(context.Context) error { return nil } -// IsActive returns true if there are pending messages to be processed -func (s *metricsAPIScaler) IsActive(ctx context.Context) (bool, error) { - v, err := s.getMetricValue(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("Error when checking metric value: %s", err)) - return false, err - } - - return v > s.metadata.activationTargetValue, nil -} - // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler func (s *metricsAPIScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ @@ -281,16 +270,16 @@ func (s *metricsAPIScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *metricsAPIScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *metricsAPIScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.getMetricValue(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error requesting metrics endpoint: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error requesting metrics endpoint: %s", err) } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationTargetValue, nil } func getMetricAPIServerRequest(ctx context.Context, meta *metricsAPIScalerMetadata) (*http.Request, error) { diff --git a/pkg/scalers/metrics_api_scaler_test.go b/pkg/scalers/metrics_api_scaler_test.go index 7dca8cac2a4..430218d864f 100644 --- a/pkg/scalers/metrics_api_scaler_test.go +++ b/pkg/scalers/metrics_api_scaler_test.go @@ -219,7 +219,7 @@ func TestBearerAuth(t *testing.T) { t.Errorf("Error creating the Scaler") } - _, err = s.GetMetrics(context.TODO(), "test-metric") + _, _, err = s.GetMetricsAndActivity(context.TODO(), "test-metric") if err != nil { t.Errorf("Error getting the metric") } diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go index e1dfac85e40..d3a7b5e5443 100644 --- a/pkg/scalers/mongo_scaler.go +++ b/pkg/scalers/mongo_scaler.go @@ -202,15 +202,6 @@ func parseMongoDBMetadata(config *ScalerConfig) (*mongoDBMetadata, string, error return &meta, connStr, nil } -func (s *mongoDBScaler) IsActive(ctx context.Context) (bool, error) { - result, err := s.getQueryResult(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("failed to get query result by mongoDB, because of %v", err)) - return false, err - } - return result > s.metadata.activationQueryValue, nil -} - // Close disposes of mongoDB connections func (s *mongoDBScaler) Close(ctx context.Context) error { if s.client != nil { @@ -244,16 +235,16 @@ func (s *mongoDBScaler) getQueryResult(ctx context.Context) (int64, error) { return docsNum, nil } -// GetMetrics query from mongoDB,and return to external metrics -func (s *mongoDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity query from mongoDB,and return to external metrics +func (s *mongoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to inspect momgoDB, because of %v", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to inspect momgoDB, because of %v", err) } metric := GenerateMetricInMili(metricName, float64(num)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil } // GetMetricSpecForScaling get the query value for scaling diff --git a/pkg/scalers/mssql_scaler.go b/pkg/scalers/mssql_scaler.go index c183cfc691a..bf0f1101717 100644 --- a/pkg/scalers/mssql_scaler.go +++ b/pkg/scalers/mssql_scaler.go @@ -244,16 +244,16 @@ func (s *mssqlScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics returns a value for a supported metric or an error if there is a problem getting the metric -func (s *mssqlScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns a value for a supported metric or an error if there is a problem getting the metric +func (s *mssqlScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting mssql: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting mssql: %s", err) } metric := GenerateMetricInMili(metricName, num) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetValue, nil } // getQueryResult returns the result of the scaler query @@ -271,16 +271,6 @@ func (s *mssqlScaler) getQueryResult(ctx context.Context) (float64, error) { return value, nil } -// IsActive returns true if there are pending events to be processed -func (s *mssqlScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.getQueryResult(ctx) - if err != nil { - return false, fmt.Errorf("error inspecting mssql: %s", err) - } - - return messages > s.metadata.activationTargetValue, nil -} - // Close closes the mssql database connections func (s *mssqlScaler) Close(context.Context) error { err := s.connection.Close() diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go index 250781a48c6..0534af40a4c 100644 --- a/pkg/scalers/mysql_scaler.go +++ b/pkg/scalers/mysql_scaler.go @@ -193,16 +193,6 @@ func (s *mySQLScaler) Close(context.Context) error { return nil } -// IsActive returns true if there are pending messages to be processed -func (s *mySQLScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.getQueryResult(ctx) - if err != nil { - s.logger.Error(err, fmt.Sprintf("Error inspecting MySQL: %s", err)) - return false, err - } - return messages > s.metadata.activationQueryValue, nil -} - // getQueryResult returns result of the scaler query func (s *mySQLScaler) getQueryResult(ctx context.Context) (float64, error) { var value float64 @@ -228,14 +218,14 @@ func (s *mySQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *mySQLScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *mySQLScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getQueryResult(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting MySQL: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting MySQL: %s", err) } metric := GenerateMetricInMili(metricName, num) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil } diff --git a/pkg/scalers/nats_jetstream_scaler.go b/pkg/scalers/nats_jetstream_scaler.go index 816a46bfbc4..f4694a79a7d 100644 --- a/pkg/scalers/nats_jetstream_scaler.go +++ b/pkg/scalers/nats_jetstream_scaler.go @@ -12,8 +12,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -369,19 +367,6 @@ func (s *natsJetStreamScaler) getNATSJetStreamMonitoringNodeURL(node string) (st return fmt.Sprintf("%s://%s.%s%s?%s", jsURL.Scheme, node, jsURL.Host, jsURL.Path, jsURL.RawQuery), nil } -func (s *natsJetStreamScaler) IsActive(ctx context.Context) (bool, error) { - err := s.getNATSJetstreamMonitoringData(ctx, s.metadata.monitoringURL) - if err != nil { - return false, err - } - - if s.stream == nil { - return false, errors.New("stream not found") - } - - return s.getMaxMsgLag() > s.metadata.activationLagThreshold, nil -} - func (s *natsJetStreamScaler) getMaxMsgLag() int64 { consumerName := s.metadata.consumer @@ -408,25 +393,22 @@ func (s *natsJetStreamScaler) GetMetricSpecForScaling(context.Context) []v2.Metr return []v2.MetricSpec{metricSpec} } -func (s *natsJetStreamScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *natsJetStreamScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { err := s.getNATSJetstreamMonitoringData(ctx, s.metadata.monitoringURL) if err != nil { - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } if s.stream == nil { - return []external_metrics.ExternalMetricValue{}, errors.New("stream not found") + return []external_metrics.ExternalMetricValue{}, false, errors.New("stream not found") } totalLag := s.getMaxMsgLag() s.logger.V(1).Info("NATS JetStream Scaler: Providing metrics based on totalLag, threshold", "totalLag", totalLag, "lagThreshold", s.metadata.lagThreshold) - metric := external_metrics.ExternalMetricValue{ - MetricName: metricName, - Value: *resource.NewQuantity(totalLag, resource.DecimalSI), - Timestamp: metav1.Now(), - } - return append([]external_metrics.ExternalMetricValue{}, metric), nil + metric := GenerateMetricInMili(metricName, float64(totalLag)) + + return []external_metrics.ExternalMetricValue{metric}, totalLag > s.metadata.activationLagThreshold, nil } func (s *natsJetStreamScaler) Close(context.Context) error { diff --git a/pkg/scalers/nats_jetstream_scaler_test.go b/pkg/scalers/nats_jetstream_scaler_test.go index 1d0d59d9494..eeafe020d23 100644 --- a/pkg/scalers/nats_jetstream_scaler_test.go +++ b/pkg/scalers/nats_jetstream_scaler_test.go @@ -255,7 +255,7 @@ func TestNATSJetStreamIsActive(t *testing.T) { logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex}, "nats_jetstream_scaler"), } - isActive, err := mockJetStreamScaler.IsActive(ctx) + _, isActive, err := mockJetStreamScaler.GetMetricsAndActivity(ctx, "metric_name") if err != nil && !mockResponse.isError { t.Errorf("Expected success for '%s' but got error %s", mockResponse.name, err) } else if mockResponse.isError && err == nil { @@ -310,7 +310,7 @@ func TestNATSJetStreamGetMetrics(t *testing.T) { logger: InitializeLogger(&ScalerConfig{TriggerMetadata: mockResponse.metadata.metadataTestData.metadata, ScalerIndex: mockResponse.metadata.scalerIndex}, "nats_jetstream_scaler"), } - _, err = mockJetStreamScaler.GetMetrics(ctx, "metric_name") + _, _, err = mockJetStreamScaler.GetMetricsAndActivity(ctx, "metric_name") if err != nil && !mockResponse.isError { t.Errorf("Expected success for '%s' but got error %s", mockResponse.name, err) diff --git a/pkg/scalers/newrelic_scaler.go b/pkg/scalers/newrelic_scaler.go index 7e5fa565625..11813fea29f 100644 --- a/pkg/scalers/newrelic_scaler.go +++ b/pkg/scalers/newrelic_scaler.go @@ -141,15 +141,6 @@ func parseNewRelicMetadata(config *ScalerConfig, logger logr.Logger) (*newrelicM return &meta, nil } -func (s *newrelicScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.executeNewRelicQuery(ctx) - if err != nil { - s.logger.Error(err, "error executing NRQL") - return false, err - } - return val > s.metadata.activationThreshold, nil -} - func (s *newrelicScaler) Close(context.Context) error { return nil } @@ -173,16 +164,16 @@ func (s *newrelicScaler) executeNewRelicQuery(ctx context.Context) (float64, err return 0, nil } -func (s *newrelicScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *newrelicScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.executeNewRelicQuery(ctx) if err != nil { s.logger.Error(err, "error executing NRQL query") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } func (s *newrelicScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { diff --git a/pkg/scalers/openstack_metrics_scaler.go b/pkg/scalers/openstack_metrics_scaler.go index e026948a445..039c9446d03 100644 --- a/pkg/scalers/openstack_metrics_scaler.go +++ b/pkg/scalers/openstack_metrics_scaler.go @@ -232,27 +232,17 @@ func (s *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2.Me return []v2.MetricSpec{metricSpec} } -func (s *openstackMetricScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *openstackMetricScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.readOpenstackMetrics(ctx) if err != nil { s.logger.Error(err, "Error collecting metric value") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil -} - -func (s *openstackMetricScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.readOpenstackMetrics(ctx) - - if err != nil { - return false, err - } - - return val > s.metadata.activationThreshold, nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } func (s *openstackMetricScaler) Close(context.Context) error { diff --git a/pkg/scalers/openstack_swift_scaler.go b/pkg/scalers/openstack_swift_scaler.go index 3f0927e75ab..3c1e3ba2905 100644 --- a/pkg/scalers/openstack_swift_scaler.go +++ b/pkg/scalers/openstack_swift_scaler.go @@ -368,31 +368,21 @@ func parseOpenstackSwiftAuthenticationMetadata(config *ScalerConfig) (*openstack return &authMeta, nil } -func (s *openstackSwiftScaler) IsActive(ctx context.Context) (bool, error) { - objectCount, err := s.getOpenstackSwiftContainerObjectCount(ctx) - - if err != nil { - return false, err - } - - return objectCount > s.metadata.activationObjectCount, nil -} - func (s *openstackSwiftScaler) Close(context.Context) error { return nil } -func (s *openstackSwiftScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *openstackSwiftScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { objectCount, err := s.getOpenstackSwiftContainerObjectCount(ctx) if err != nil { s.logger.Error(err, "error getting objectCount") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(objectCount)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, objectCount > s.metadata.activationObjectCount, nil } func (s *openstackSwiftScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index 6bd4203b05d..00f2b2048d3 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -167,16 +167,6 @@ func (s *postgreSQLScaler) Close(context.Context) error { return nil } -// IsActive returns true if there are pending messages to be processed -func (s *postgreSQLScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := s.getActiveNumber(ctx) - if err != nil { - return false, fmt.Errorf("error inspecting postgreSQL: %s", err) - } - - return messages > s.metadata.activationTargetQueryValue, nil -} - func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error) { var id float64 err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&id) @@ -201,14 +191,14 @@ func (s *postgreSQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *postgreSQLScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *postgreSQLScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { num, err := s.getActiveNumber(ctx) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting postgreSQL: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error inspecting postgreSQL: %s", err) } metric := GenerateMetricInMili(metricName, num) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetQueryValue, nil } diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go index 07b4bc08ed3..b163124129e 100644 --- a/pkg/scalers/predictkube_scaler.go +++ b/pkg/scalers/predictkube_scaler.go @@ -23,10 +23,8 @@ import ( "github.com/prometheus/common/model" "github.com/xhit/go-str2duration/v2" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" health "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" @@ -173,31 +171,6 @@ func NewPredictKubeScaler(ctx context.Context, config *ScalerConfig) (*PredictKu return s, nil } -// IsActive returns true if we are able to get metrics from PredictKube -func (s *PredictKubeScaler) IsActive(ctx context.Context) (bool, error) { - results, err := s.doQuery(ctx) - if err != nil { - return false, err - } - - resp, err := s.healthClient.Check(ctx, &health.HealthCheckRequest{}) - - if resp == nil { - return false, fmt.Errorf("can't connect grpc server: empty server response, code: %v", codes.Unknown) - } - - if err != nil { - return false, fmt.Errorf("can't connect grpc server: %v, code: %v", err, status.Code(err)) - } - - var y float64 - if len(results) > 0 { - y = results[len(results)-1].Value - } - - return y > s.metadata.activationThreshold, nil -} - func (s *PredictKubeScaler) Close(_ context.Context) error { if s != nil && s.grpcConn != nil { return s.grpcConn.Close() @@ -220,30 +193,29 @@ func (s *PredictKubeScaler) GetMetricSpecForScaling(context.Context) []v2.Metric return []v2.MetricSpec{metricSpec} } -func (s *PredictKubeScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - value, err := s.doPredictRequest(ctx) +func (s *PredictKubeScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + value, activationValue, err := s.doPredictRequest(ctx) if err != nil { s.logger.Error(err, "error executing query to predict controller service") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } if value == 0 { - err = errors.New("empty response after predict request") - s.logger.Error(err, "") - return nil, err + s.logger.V(1).Info("empty response after predict request") + return []external_metrics.ExternalMetricValue{}, false, nil } s.logger.V(1).Info(fmt.Sprintf("predict value is: %f", value)) metric := GenerateMetricInMili(metricName, value) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, activationValue > s.metadata.activationThreshold, nil } -func (s *PredictKubeScaler) doPredictRequest(ctx context.Context) (float64, error) { +func (s *PredictKubeScaler) doPredictRequest(ctx context.Context) (float64, float64, error) { results, err := s.doQuery(ctx) if err != nil { - return 0, err + return 0, 0, err } resp, err := s.grpcClient.GetPredictMetric(ctx, &pb.ReqGetPredictMetric{ @@ -252,7 +224,7 @@ func (s *PredictKubeScaler) doPredictRequest(ctx context.Context) (float64, erro }) if err != nil { - return 0, err + return 0, 0, err } var y float64 @@ -267,7 +239,7 @@ func (s *PredictKubeScaler) doPredictRequest(ctx context.Context) (float64, erro return y } return x - }(x, y), nil + }(x, y), y, nil } func (s *PredictKubeScaler) doQuery(ctx context.Context) ([]*commonproto.Item, error) { diff --git a/pkg/scalers/predictkube_scaler_test.go b/pkg/scalers/predictkube_scaler_test.go index 97c072a604c..af727de1ffc 100644 --- a/pkg/scalers/predictkube_scaler_test.go +++ b/pkg/scalers/predictkube_scaler_test.go @@ -217,7 +217,7 @@ func TestPredictKubeGetMetrics(t *testing.T) { ) assert.NoError(t, err) - result, err := mockPredictKubeScaler.GetMetrics(context.Background(), predictKubeMetricPrefix) + result, _, err := mockPredictKubeScaler.GetMetricsAndActivity(context.Background(), predictKubeMetricPrefix) assert.NoError(t, err) assert.Equal(t, len(result), 1) assert.Equal(t, result[0].Value, *resource.NewMilliQuantity(mockPredictServer.val*1000, resource.DecimalSI)) diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index c44acc5f812..6a455bcda3b 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -189,16 +189,6 @@ func parsePrometheusMetadata(config *ScalerConfig) (meta *prometheusMetadata, er return meta, nil } -func (s *prometheusScaler) IsActive(ctx context.Context) (bool, error) { - val, err := s.ExecutePromQuery(ctx) - if err != nil { - s.logger.Error(err, "error executing prometheus query") - return false, err - } - - return val > s.metadata.activationThreshold, nil -} - func (s *prometheusScaler) Close(context.Context) error { return nil } @@ -309,14 +299,14 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error return v, nil } -func (s *prometheusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *prometheusScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { val, err := s.ExecutePromQuery(ctx) if err != nil { s.logger.Error(err, "error executing prometheus query") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, val) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } diff --git a/pkg/scalers/pulsar_scaler.go b/pkg/scalers/pulsar_scaler.go index cd68b58d344..f814f6db991 100644 --- a/pkg/scalers/pulsar_scaler.go +++ b/pkg/scalers/pulsar_scaler.go @@ -254,35 +254,20 @@ func (s *pulsarScaler) getMsgBackLog(ctx context.Context) (int64, bool, error) { return v.Msgbacklog, found, nil } -// IsActive determines if we need to scale from zero -func (s *pulsarScaler) IsActive(ctx context.Context) (bool, error) { - msgBackLog, found, err := s.getMsgBackLog(ctx) - if err != nil { - return false, err - } - - if !found { - s.logger.Info("Pulsar subscription is not active, either no subscription found or no backlog detected") - return false, nil - } - - return msgBackLog > s.metadata.activationMsgBacklogThreshold, nil -} - -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *pulsarScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetGetMetricsAndActivityMetrics returns value for a supported metric and an error if there is a problem getting the metric +func (s *pulsarScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { msgBacklog, found, err := s.getMsgBackLog(ctx) if err != nil { - return nil, fmt.Errorf("error requesting stats from url: %s", err) + return nil, false, fmt.Errorf("error requesting stats from url: %s", err) } if !found { - return nil, fmt.Errorf("have not subscription found! %s", s.metadata.subscription) + return nil, false, fmt.Errorf("have not subscription found! %s", s.metadata.subscription) } metric := GenerateMetricInMili(metricName, float64(msgBacklog)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, msgBacklog > s.metadata.activationMsgBacklogThreshold, nil } func (s *pulsarScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { diff --git a/pkg/scalers/pulsar_scaler_test.go b/pkg/scalers/pulsar_scaler_test.go index 2146c85e9e5..549546ef795 100644 --- a/pkg/scalers/pulsar_scaler_test.go +++ b/pkg/scalers/pulsar_scaler_test.go @@ -231,7 +231,10 @@ func TestPulsarIsActive(t *testing.T) { t.Fatal("Failed:", err) } - active, err := mockPulsarScaler.IsActive(context.TODO()) + metricSpec := mockPulsarScaler.GetMetricSpecForScaling(context.TODO()) + metricName := metricSpec[0].External.Metric.Name + + _, active, err := mockPulsarScaler.GetMetricsAndActivity(context.TODO(), metricName) if err != nil { t.Fatal("Failed:", err) } @@ -252,7 +255,10 @@ func TestPulsarIsActiveWithAuthParams(t *testing.T) { t.Fatal("Failed:", err) } - active, err := mockPulsarScaler.IsActive(context.TODO()) + metricSpec := mockPulsarScaler.GetMetricSpecForScaling(context.TODO()) + metricName := metricSpec[0].External.Metric.Name + + _, active, err := mockPulsarScaler.GetMetricsAndActivity(context.TODO(), metricName) if err != nil && !testData.metadataTestData.isError { t.Fatal("Failed:", err) } @@ -276,7 +282,7 @@ func TestPulsarGetMetric(t *testing.T) { metricSpec := mockPulsarScaler.GetMetricSpecForScaling(context.TODO()) metricName := metricSpec[0].External.Metric.Name - metric, err := mockPulsarScaler.GetMetrics(context.TODO(), metricName) + metric, _, err := mockPulsarScaler.GetMetricsAndActivity(context.TODO(), metricName) if err != nil { t.Fatal("Failed:", err) } diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index a449f060ae4..eb937516343 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -380,19 +380,6 @@ func (s *rabbitMQScaler) Close(context.Context) error { return nil } -// IsActive returns true if there are pending messages to be processed -func (s *rabbitMQScaler) IsActive(ctx context.Context) (bool, error) { - messages, publishRate, err := s.getQueueStatus() - if err != nil { - return false, s.anonimizeRabbitMQError(err) - } - - if s.metadata.mode == rabbitModeQueueLength { - return float64(messages) > s.metadata.activationValue, nil - } - return publishRate > s.metadata.activationValue || float64(messages) > s.metadata.activationValue, nil -} - func (s *rabbitMQScaler) getQueueStatus() (int64, float64, error) { if s.metadata.protocol == httpProtocol { info, err := s.getQueueInfoViaHTTP() @@ -499,21 +486,24 @@ func (s *rabbitMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpe return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *rabbitMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *rabbitMQScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { messages, publishRate, err := s.getQueueStatus() if err != nil { - return []external_metrics.ExternalMetricValue{}, s.anonimizeRabbitMQError(err) + return []external_metrics.ExternalMetricValue{}, false, s.anonimizeRabbitMQError(err) } var metric external_metrics.ExternalMetricValue + var isActive bool if s.metadata.mode == rabbitModeQueueLength { metric = GenerateMetricInMili(metricName, float64(messages)) + isActive = float64(messages) > s.metadata.activationValue } else { metric = GenerateMetricInMili(metricName, publishRate) + isActive = publishRate > s.metadata.activationValue || float64(messages) > s.metadata.activationValue } - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, isActive, nil } func getComposedQueue(s *rabbitMQScaler, q []queueInfo) (queueInfo, error) { diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go index 7bc7f74d8eb..3cb8a8b491e 100644 --- a/pkg/scalers/rabbitmq_scaler_test.go +++ b/pkg/scalers/rabbitmq_scaler_test.go @@ -263,7 +263,7 @@ func TestGetQueueInfo(t *testing.T) { } ctx := context.TODO() - active, err := s.IsActive(ctx) + _, active, err := s.GetMetricsAndActivity(ctx, "Metric") if testData.responseStatus == http.StatusOK { if err != nil { @@ -400,7 +400,7 @@ func TestGetQueueInfoWithRegex(t *testing.T) { } ctx := context.TODO() - active, err := s.IsActive(ctx) + _, active, err := s.GetMetricsAndActivity(ctx, "Metric") if testData.responseStatus == http.StatusOK { if err != nil { @@ -479,7 +479,7 @@ func TestGetPageSizeWithRegex(t *testing.T) { } ctx := context.TODO() - active, err := s.IsActive(ctx) + _, active, err := s.GetMetricsAndActivity(ctx, "Metric") if err != nil { t.Error("Expect success", err) @@ -600,7 +600,7 @@ func TestRegexQueueMissingError(t *testing.T) { } ctx := context.TODO() - _, err = s.IsActive(ctx) + _, _, err = s.GetMetricsAndActivity(ctx, "Metric") if err != nil && !testData.isError { t.Error("Expected success but got error", err) } diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go index b8f6120bce5..f2cb4dc293d 100644 --- a/pkg/scalers/redis_scaler.go +++ b/pkg/scalers/redis_scaler.go @@ -219,18 +219,6 @@ func parseRedisMetadata(config *ScalerConfig, parserFn redisAddressParser) (*red return &meta, nil } -// IsActive checks if there is any element in the Redis list -func (s *redisScaler) IsActive(ctx context.Context) (bool, error) { - length, err := s.getListLengthFn(ctx) - - if err != nil { - s.logger.Error(err, "error") - return false, err - } - - return length > s.metadata.activationListLength, nil -} - func (s *redisScaler) Close(context.Context) error { return s.closeFn() } @@ -250,18 +238,18 @@ func (s *redisScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics connects to Redis and finds the length of the list -func (s *redisScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity connects to Redis and finds the length of the list +func (s *redisScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { listLen, err := s.getListLengthFn(ctx) if err != nil { s.logger.Error(err, "error getting list length") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(listLen)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, listLen > s.metadata.activationListLength, nil } func parseRedisAddress(metadata, resolvedEnv, authParams map[string]string) (redisConnectionInfo, error) { diff --git a/pkg/scalers/redis_streams_scaler.go b/pkg/scalers/redis_streams_scaler.go index 349a731cc9e..fad5782c1ae 100644 --- a/pkg/scalers/redis_streams_scaler.go +++ b/pkg/scalers/redis_streams_scaler.go @@ -193,18 +193,6 @@ func parseRedisStreamsMetadata(config *ScalerConfig, parseFn redisAddressParser) return &meta, nil } -// IsActive checks if there are pending entries in the 'Pending Entries List' for consumer group of a stream -func (s *redisStreamsScaler) IsActive(ctx context.Context) (bool, error) { - count, err := s.getPendingEntriesCountFn(ctx) - - if err != nil { - s.logger.Error(err, "error") - return false, err - } - - return count > 0, nil -} - func (s *redisStreamsScaler) Close(context.Context) error { return s.closeFn() } @@ -221,15 +209,16 @@ func (s *redisStreamsScaler) GetMetricSpecForScaling(context.Context) []v2.Metri return []v2.MetricSpec{metricSpec} } -// GetMetrics fetches the number of pending entries for a consumer group in a stream -func (s *redisStreamsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity fetches the number of pending entries for a consumer group in a stream +func (s *redisStreamsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { pendingEntriesCount, err := s.getPendingEntriesCountFn(ctx) if err != nil { s.logger.Error(err, "error fetching pending entries count") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } metric := GenerateMetricInMili(metricName, float64(pendingEntriesCount)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + + return []external_metrics.ExternalMetricValue{metric}, pendingEntriesCount > 0, nil } diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go index 8f8554df7c5..c8dad8e0a90 100644 --- a/pkg/scalers/scaler.go +++ b/pkg/scalers/scaler.go @@ -41,16 +41,13 @@ func init() { // Scaler interface type Scaler interface { - - // The scaler returns the metric values for a metric Name and criteria matching the selector - GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) + // The scaler returns the metric values and activity for a metric Name + GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) // Returns the metrics based on which this scaler determines that the ScaleTarget scales. This is used to construct the HPA spec that is created for // this scaled object. The labels used should match the selectors used in GetMetrics GetMetricSpecForScaling(ctx context.Context) []v2.MetricSpec - IsActive(ctx context.Context) (bool, error) - // Close any resources that need disposing when scaler is no longer used or destroyed Close(ctx context.Context) error } @@ -80,6 +77,10 @@ type ScalerConfig struct { // Name of the trigger TriggerName string + // Marks whether we should query metrics only during the polling interval + // Any requests for metrics in between are read from the cache + TriggerUseCachedMetrics bool + // TriggerMetadata TriggerMetadata map[string]string diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 7273d572ac2..32a71398e0d 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -152,15 +152,15 @@ func (s *seleniumGridScaler) Close(context.Context) error { return nil } -func (s *seleniumGridScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +func (s *seleniumGridScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { sessions, err := s.getSessionsCount(ctx, s.logger) if err != nil { - return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error requesting selenium grid endpoint: %s", err) + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error requesting selenium grid endpoint: %s", err) } metric := GenerateMetricInMili(metricName, float64(sessions)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + return []external_metrics.ExternalMetricValue{metric}, sessions > s.metadata.activationThreshold, nil } func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { @@ -177,15 +177,6 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri return []v2.MetricSpec{metricSpec} } -func (s *seleniumGridScaler) IsActive(ctx context.Context) (bool, error) { - v, err := s.getSessionsCount(ctx, s.logger) - if err != nil { - return false, err - } - - return v > s.metadata.activationThreshold, nil -} - func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", diff --git a/pkg/scalers/solace_scaler.go b/pkg/scalers/solace_scaler.go index 91e67744a13..36a7c0207e8 100644 --- a/pkg/scalers/solace_scaler.go +++ b/pkg/scalers/solace_scaler.go @@ -347,12 +347,13 @@ func (s *SolaceScaler) getSolaceQueueMetricsFromSEMP(ctx context.Context) (Solac // INTERFACE METHOD // Call SEMP API to retrieve metrics // returns value for named metric -func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// returns true if queue messageCount > 0 || msgSpoolUsage > 0 +func (s *SolaceScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { var metricValues, mv SolaceMetricValues var mve error if mv, mve = s.getSolaceQueueMetricsFromSEMP(ctx); mve != nil { s.logger.Error(mve, "call to semp endpoint failed") - return []external_metrics.ExternalMetricValue{}, mve + return []external_metrics.ExternalMetricValue{}, false, mve } metricValues = mv @@ -366,21 +367,9 @@ func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string) ([]ext // Should never end up here err := fmt.Errorf("unidentified metric: %s", metricName) s.logger.Error(err, "returning error to calling app") - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } - return append([]external_metrics.ExternalMetricValue{}, metric), nil -} - -// INTERFACE METHOD -// Call SEMP API to retrieve metrics -// IsActive returns true if queue messageCount > 0 || msgSpoolUsage > 0 -func (s *SolaceScaler) IsActive(ctx context.Context) (bool, error) { - metricValues, err := s.getSolaceQueueMetricsFromSEMP(ctx) - if err != nil { - s.logger.Error(err, "call to semp endpoint failed") - return false, err - } - return (metricValues.msgCount > s.metadata.activationMsgCountTarget || metricValues.msgSpoolUsage > s.metadata.activationMsgSpoolUsageTarget), nil + return []external_metrics.ExternalMetricValue{metric}, (metricValues.msgCount > s.metadata.activationMsgCountTarget || metricValues.msgSpoolUsage > s.metadata.activationMsgSpoolUsageTarget), nil } // Do Nothing - Satisfies Interface diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go index 5dae5fe33d0..a4f9b0c7787 100644 --- a/pkg/scalers/stan_scaler.go +++ b/pkg/scalers/stan_scaler.go @@ -139,45 +139,6 @@ func parseStanMetadata(config *ScalerConfig) (stanMetadata, error) { return meta, nil } -// IsActive determines if we need to scale from zero -func (s *stanScaler) IsActive(ctx context.Context) (bool, error) { - req, err := http.NewRequestWithContext(ctx, "GET", s.metadata.monitoringEndpoint, nil) - if err != nil { - return false, err - } - resp, err := s.httpClient.Do(req) - if err != nil { - s.logger.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "natsServerMonitoringEndpoint", s.metadata.monitoringEndpoint) - return false, err - } - - if resp.StatusCode == 404 { - req, err := http.NewRequestWithContext(ctx, "GET", s.metadata.stanChannelsEndpoint, nil) - if err != nil { - return false, err - } - baseResp, err := s.httpClient.Do(req) - if err != nil { - return false, err - } - defer baseResp.Body.Close() - if baseResp.StatusCode == 404 { - s.logger.Info("Streaming broker endpoint returned 404. Please ensure it has been created", "url", s.metadata.monitoringEndpoint, "channelName", s.metadata.subject) - } else { - s.logger.Info("Unable to connect to STAN. Please ensure you have configured the ScaledObject with the correct endpoint.", "baseResp.StatusCode", baseResp.StatusCode, "monitoringEndpoint", s.metadata.monitoringEndpoint) - } - - return false, err - } - - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil { - s.logger.Error(err, "Unable to decode channel info as %v", err) - return false, err - } - return s.hasPendingMessage() || s.getMaxMsgLag() > s.metadata.activationLagThreshold, nil -} - func getSTANChannelsEndpoint(useHTTPS bool, natsServerEndpoint string) string { protocol := natsStreamingHTTPProtocol if useHTTPS { @@ -240,28 +201,49 @@ func (s *stanScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { return []v2.MetricSpec{metricSpec} } -// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *stanScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *stanScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { req, err := http.NewRequestWithContext(ctx, "GET", s.metadata.monitoringEndpoint, nil) if err != nil { - return nil, err + return []external_metrics.ExternalMetricValue{}, false, err } resp, err := s.httpClient.Do(req) if err != nil { s.logger.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "monitoringEndpoint", s.metadata.monitoringEndpoint) - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err + } + + if resp.StatusCode == 404 { + req, err := http.NewRequestWithContext(ctx, "GET", s.metadata.stanChannelsEndpoint, nil) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, err + } + baseResp, err := s.httpClient.Do(req) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, err + } + defer baseResp.Body.Close() + if baseResp.StatusCode == 404 { + s.logger.Info("Streaming broker endpoint returned 404. Please ensure it has been created", "url", s.metadata.monitoringEndpoint, "channelName", s.metadata.subject) + } else { + s.logger.Info("Unable to connect to STAN. Please ensure you have configured the ScaledObject with the correct endpoint.", "baseResp.StatusCode", baseResp.StatusCode, "monitoringEndpoint", s.metadata.monitoringEndpoint) + } + + return []external_metrics.ExternalMetricValue{}, false, err } defer resp.Body.Close() if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil { s.logger.Error(err, "Unable to decode channel info as %v", err) - return []external_metrics.ExternalMetricValue{}, err + return []external_metrics.ExternalMetricValue{}, false, err } totalLag := s.getMaxMsgLag() s.logger.V(1).Info("Stan scaler: Providing metrics based on totalLag, threshold", "totalLag", totalLag, "lagThreshold", s.metadata.lagThreshold) + metric := GenerateMetricInMili(metricName, float64(totalLag)) - return append([]external_metrics.ExternalMetricValue{}, metric), nil + + return []external_metrics.ExternalMetricValue{metric}, s.hasPendingMessage() || totalLag > s.metadata.activationLagThreshold, nil } // Nothing to close here. diff --git a/pkg/scaling/cache/metricscache/metrics_record.go b/pkg/scaling/cache/metricscache/metrics_record.go new file mode 100644 index 00000000000..d640b8281a3 --- /dev/null +++ b/pkg/scaling/cache/metricscache/metrics_record.go @@ -0,0 +1,45 @@ +package metricscache + +import ( + "sync" + + "k8s.io/metrics/pkg/apis/external_metrics" +) + +type MetricsRecord struct { + IsActive bool + Metric []external_metrics.ExternalMetricValue + ScalerError error +} + +type MetricsCache struct { + metricRecords map[string]map[string]MetricsRecord + lock *sync.RWMutex +} + +func NewMetricsCache() MetricsCache { + return MetricsCache{ + metricRecords: map[string]map[string]MetricsRecord{}, + lock: &sync.RWMutex{}, + } +} + +func (mc *MetricsCache) ReadRecord(scaledObjectIdentifier, metricName string) (MetricsRecord, bool) { + mc.lock.RLock() + defer mc.lock.RUnlock() + record, ok := mc.metricRecords[scaledObjectIdentifier][metricName] + + return record, ok +} + +func (mc *MetricsCache) StoreRecords(scaledObjectIdentifier string, metricsRecords map[string]MetricsRecord) { + mc.lock.Lock() + defer mc.lock.Unlock() + mc.metricRecords[scaledObjectIdentifier] = metricsRecords +} + +func (mc *MetricsCache) Delete(scaledObjectIdentifier string) { + mc.lock.Lock() + defer mc.lock.Unlock() + delete(mc.metricRecords, scaledObjectIdentifier) +} diff --git a/pkg/scaling/cache/scalers_cache.go b/pkg/scaling/cache/scalers_cache.go index d7d3b33a275..3059fb7e33d 100644 --- a/pkg/scaling/cache/scalers_cache.go +++ b/pkg/scaling/cache/scalers_cache.go @@ -21,7 +21,6 @@ import ( "fmt" "math" - "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" @@ -31,13 +30,14 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/scalers" + "github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache" ) +var log = logf.Log.WithName("scalers_cache") + type ScalersCache struct { ScaledObject *kedav1alpha1.ScaledObject - Generation int64 Scalers []ScalerBuilder - Logger logr.Logger Recorder record.EventRecorder } @@ -68,56 +68,104 @@ func (c *ScalersCache) GetPushScalers() []scalers.PushScaler { return result } -func (c *ScalersCache) GetMetricsForScaler(ctx context.Context, id int, metricName string) ([]external_metrics.ExternalMetricValue, error) { - if id < 0 || id >= len(c.Scalers) { - return nil, fmt.Errorf("scaler with id %d not found. Len = %d", id, len(c.Scalers)) +// GetMetricsForScaler returns metric value for a scaler identified by the metric name +// and by the input index (from the list of scalers in this ScaledObject) +func (c *ScalersCache) GetMetricsForScaler(ctx context.Context, index int, metricName string) ([]external_metrics.ExternalMetricValue, error) { + if index < 0 || index >= len(c.Scalers) { + return nil, fmt.Errorf("scaler with id %d not found. Len = %d", index, len(c.Scalers)) } - m, err := c.Scalers[id].Scaler.GetMetrics(ctx, metricName) + m, _, err := c.Scalers[index].Scaler.GetMetricsAndActivity(ctx, metricName) if err == nil { return m, nil } - ns, err := c.refreshScaler(ctx, id) + ns, err := c.refreshScaler(ctx, index) if err != nil { return nil, err } - return ns.GetMetrics(ctx, metricName) + m, _, err = ns.GetMetricsAndActivity(ctx, metricName) + return m, err } -func (c *ScalersCache) IsScaledObjectActive(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) (bool, bool, []external_metrics.ExternalMetricValue) { - isActive := false +// GetScaledObjectState returns whether the input ScaledObject is active as a first parameters, +// the second parameter indicates whether there was any error during quering scalers +// the third parameter returns map of metrics record - a metric value for each scaler and it's metric +func (c *ScalersCache) GetScaledObjectState(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) (bool, bool, map[string]metricscache.MetricsRecord) { + logger := log.WithValues("scaledobject.Name", scaledObject.Name, "scaledObject.Namespace", scaledObject.Namespace, "scaleTarget.Name", scaledObject.Spec.ScaleTargetRef.Name) + + isScaledObjectActive := false isError := false + metricsRecord := map[string]metricscache.MetricsRecord{} + // Let's collect status of all scalers, no matter if any scaler raises error or is active for i, s := range c.Scalers { - isTriggerActive, err := s.Scaler.IsActive(ctx) - if err != nil { + metricSpec := s.Scaler.GetMetricSpecForScaling(ctx) + + // no metric spec returned for a scaler -> this could signal error during connection to the scaler + // usually in case this is an external scaler + // let's try to refresh the scaler and query metrics spec again + if len(metricSpec) < 1 { + var err error var ns scalers.Scaler + ns, err = c.refreshScaler(ctx, i) if err == nil { - isTriggerActive, err = ns.IsActive(ctx) + metricSpec = ns.GetMetricSpecForScaling(ctx) + if len(metricSpec) < 1 { + isError = true + err = fmt.Errorf("error getting metrics spec") + logger.Error(err, "error getting metric spec for the scaler", "scaler", s.ScalerConfig.TriggerName) + c.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) + } + } else { + isError = true + logger.Error(err, "error getting metric spec for the scaler", "scaler", s.ScalerConfig.TriggerName) + c.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) } } - logger := c.Logger.WithValues("scaledobject.Name", scaledObject.Name, "scaledObject.Namespace", scaledObject.Namespace, - "scaleTarget.Name", scaledObject.Spec.ScaleTargetRef.Name) + for _, spec := range metricSpec { + // skip cpu/memory resource scaler, these scalers are also always Active + if spec.External == nil { + isScaledObjectActive = true + continue + } - if err != nil { - isError = true - logger.Error(err, "Error getting scale decision") - c.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) - } else if isTriggerActive { - isActive = true - if externalMetricsSpec := s.Scaler.GetMetricSpecForScaling(ctx)[0].External; externalMetricsSpec != nil { - logger.V(1).Info("Scaler for scaledObject is active", "Metrics Name", externalMetricsSpec.Metric.Name) + metric, isMetricActive, err := s.Scaler.GetMetricsAndActivity(ctx, spec.External.Metric.Name) + if err != nil { + var ns scalers.Scaler + ns, err = c.refreshScaler(ctx, i) + if err == nil { + metric, isMetricActive, err = ns.GetMetricsAndActivity(ctx, spec.External.Metric.Name) + } } - if resourceMetricsSpec := s.Scaler.GetMetricSpecForScaling(ctx)[0].Resource; resourceMetricsSpec != nil { - logger.V(1).Info("Scaler for scaledObject is active", "Metrics Name", resourceMetricsSpec.Name) + + if s.ScalerConfig.TriggerUseCachedMetrics { + metricsRecord[spec.External.Metric.Name] = metricscache.MetricsRecord{ + IsActive: isMetricActive, + Metric: metric, + ScalerError: err, + } + } + + if err != nil { + isError = true + logger.Error(err, "error getting scale decision") + c.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) + } else if isMetricActive { + isScaledObjectActive = true + if spec.External != nil { + logger.V(1).Info("Scaler for scaledObject is active", "Metrics Name", spec.External.Metric.Name) + } + if spec.Resource != nil { + logger.V(1).Info("Scaler for scaledObject is active", "Metrics Name", spec.Resource.Name) + } } } } - return isActive, isError, []external_metrics.ExternalMetricValue{} + return isScaledObjectActive, isError, metricsRecord } func (c *ScalersCache) IsScaledJobActive(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) (bool, int64, int64) { @@ -180,27 +228,6 @@ func (c *ScalersCache) IsScaledJobActive(ctx context.Context, scaledJob *kedav1a return isActive, ceilToInt64(queueLength), ceilToInt64(maxValue) } -// TODO this is probably not needed, revisit whole package -func (c *ScalersCache) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { - var metrics []external_metrics.ExternalMetricValue - for i, s := range c.Scalers { - m, err := s.Scaler.GetMetrics(ctx, metricName) - if err != nil { - ns, err := c.refreshScaler(ctx, i) - if err != nil { - return metrics, err - } - m, err = ns.GetMetrics(ctx, metricName) - if err != nil { - return metrics, err - } - } - metrics = append(metrics, m...) - } - - return metrics, nil -} - func (c *ScalersCache) refreshScaler(ctx context.Context, id int) (scalers.Scaler, error) { if id < 0 || id >= len(c.Scalers) { return nil, fmt.Errorf("scaler with id %d not found. Len = %d", id, len(c.Scalers)) @@ -236,7 +263,7 @@ func (c *ScalersCache) Close(ctx context.Context) { for _, s := range scalers { err := s.Scaler.Close(ctx) if err != nil { - c.Logger.Error(err, "error closing scaler", "scaler", s) + log.Error(err, "error closing scaler", "scaler", s) } } } @@ -258,7 +285,7 @@ func (c *ScalersCache) getScaledJobMetrics(ctx context.Context, scaledJob *kedav maxValue := float64(0) scalerType := fmt.Sprintf("%T:", s) - scalerLogger := c.Logger.WithValues("ScaledJob", scaledJob.Name, "Scaler", scalerType) + scalerLogger := log.WithValues("ScaledJob", scaledJob.Name, "Scaler", scalerType) metricSpecs := s.Scaler.GetMetricSpecForScaling(ctx) @@ -268,33 +295,26 @@ func (c *ScalersCache) getScaledJobMetrics(ctx context.Context, scaledJob *kedav continue } - isTriggerActive, err := s.Scaler.IsActive(ctx) + // TODO here we should probably loop through all metrics in a Scaler + // as it is done for ScaledObject + metrics, isTriggerActive, err := s.Scaler.GetMetricsAndActivity(ctx, metricSpecs[0].External.Metric.Name) if err != nil { var ns scalers.Scaler ns, err = c.refreshScaler(ctx, i) if err == nil { - isTriggerActive, err = ns.IsActive(ctx) + metrics, isTriggerActive, err = ns.GetMetricsAndActivity(ctx, metricSpecs[0].External.Metric.Name) } } if err != nil { - scalerLogger.V(1).Info("Error getting scaler.IsActive, but continue", "Error", err) + scalerLogger.V(1).Info("Error getting scaler metrics and activity, but continue", "error", err) c.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) continue } targetAverageValue = getTargetAverageValue(metricSpecs) - // TODO this should probably be `cache.GetMetricsForScaler(ctx, scalerIndex, metricName)` - metrics, err := s.Scaler.GetMetrics(ctx, metricSpecs[0].External.Metric.Name) - if err != nil { - scalerLogger.V(1).Info("Error getting scaler metrics, but continue", "Error", err) - c.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) - continue - } - var metricValue float64 - for _, m := range metrics { if m.MetricName == metricSpecs[0].External.Metric.Name { metricValue = m.Value.AsApproximateFloat64() diff --git a/pkg/scaling/cache/scalers_cache_test.go b/pkg/scaling/cache/scalers_cache_test.go index fd73a8a01ab..a8559288d7f 100644 --- a/pkg/scaling/cache/scalers_cache_test.go +++ b/pkg/scaling/cache/scalers_cache_test.go @@ -5,7 +5,6 @@ import ( "fmt" "testing" - "github.com/go-logr/logr" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" v2 "k8s.io/api/autoscaling/v2" @@ -70,7 +69,7 @@ func TestIsScaledJobActive(t *testing.T) { recorder := record.NewFakeRecorder(1) // Keep the current behavior // Assme 1 trigger only - scaledJobSingle := createScaledObject(0, 100, "") // testing default = max + scaledJobSingle := createScaledJob(0, 100, "") // testing default = max scalerSingle := []ScalerBuilder{{ Scaler: createScaler(ctrl, int64(20), int64(2), true, metricName), Factory: func() (scalers.Scaler, *scalers.ScalerConfig, error) { @@ -80,7 +79,6 @@ func TestIsScaledJobActive(t *testing.T) { cache := ScalersCache{ Scalers: scalerSingle, - Logger: logr.Discard(), Recorder: recorder, } @@ -100,7 +98,6 @@ func TestIsScaledJobActive(t *testing.T) { cache = ScalersCache{ Scalers: scalerSingle, - Logger: logr.Discard(), Recorder: recorder, } @@ -120,7 +117,7 @@ func TestIsScaledJobActive(t *testing.T) { } for index, scalerTestData := range scalerTestDatam { - scaledJob := createScaledObject(scalerTestData.MinReplicaCount, scalerTestData.MaxReplicaCount, scalerTestData.MultipleScalersCalculation) + scaledJob := createScaledJob(scalerTestData.MinReplicaCount, scalerTestData.MaxReplicaCount, scalerTestData.MultipleScalersCalculation) scalersToTest := []ScalerBuilder{{ Scaler: createScaler(ctrl, scalerTestData.Scaler1QueueLength, scalerTestData.Scaler1AverageValue, scalerTestData.Scaler1IsActive, scalerTestData.MetricName), Factory: func() (scalers.Scaler, *scalers.ScalerConfig, error) { @@ -145,7 +142,6 @@ func TestIsScaledJobActive(t *testing.T) { cache = ScalersCache{ Scalers: scalersToTest, - Logger: logr.Discard(), Recorder: recorder, } fmt.Printf("index: %d", index) @@ -164,7 +160,7 @@ func TestIsScaledJobActiveIfQueueEmptyButMinReplicaCountGreaterZero(t *testing.T recorder := record.NewFakeRecorder(1) // Keep the current behavior // Assme 1 trigger only - scaledJobSingle := createScaledObject(1, 100, "") // testing default = max + scaledJobSingle := createScaledJob(1, 100, "") // testing default = max scalerSingle := []ScalerBuilder{{ Scaler: createScaler(ctrl, int64(0), int64(1), true, metricName), Factory: func() (scalers.Scaler, *scalers.ScalerConfig, error) { @@ -174,7 +170,6 @@ func TestIsScaledJobActiveIfQueueEmptyButMinReplicaCountGreaterZero(t *testing.T cache := ScalersCache{ Scalers: scalerSingle, - Logger: logr.Discard(), Recorder: recorder, } @@ -248,7 +243,7 @@ type scalerTestData struct { MinReplicaCount int32 } -func createScaledObject(minReplicaCount int32, maxReplicaCount int32, multipleScalersCalculation string) *kedav1alpha1.ScaledJob { +func createScaledJob(minReplicaCount int32, maxReplicaCount int32, multipleScalersCalculation string) *kedav1alpha1.ScaledJob { if multipleScalersCalculation != "" { return &kedav1alpha1.ScaledJob{ Spec: kedav1alpha1.ScaledJobSpec{ @@ -278,9 +273,8 @@ func createScaler(ctrl *gomock.Controller, queueLength int64, averageValue int64 Value: *resource.NewQuantity(queueLength, resource.DecimalSI), }, } - scaler.EXPECT().IsActive(gomock.Any()).Return(isActive, nil) scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Any()).Return(metrics, nil) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return(metrics, isActive, nil) scaler.EXPECT().Close(gomock.Any()) return scaler } diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 2d7159dc661..fa0ca9da37e 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -41,6 +41,7 @@ import ( "github.com/kedacore/keda/v2/pkg/prommetrics" "github.com/kedacore/keda/v2/pkg/scalers" "github.com/kedacore/keda/v2/pkg/scaling/cache" + "github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache" "github.com/kedacore/keda/v2/pkg/scaling/executor" "github.com/kedacore/keda/v2/pkg/scaling/resolver" ) @@ -53,38 +54,40 @@ type ScaleHandler interface { GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) ClearScalersCache(ctx context.Context, scalableObject interface{}) error - GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) + GetScaledObjectMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) } type scaleHandler struct { - client client.Client - logger logr.Logger - scaleLoopContexts *sync.Map - scaleExecutor executor.ScaleExecutor - globalHTTPTimeout time.Duration - recorder record.EventRecorder - scalerCaches map[string]*cache.ScalersCache - lock *sync.RWMutex - secretsLister corev1listers.SecretLister + client client.Client + logger logr.Logger + scaleLoopContexts *sync.Map + scaleExecutor executor.ScaleExecutor + globalHTTPTimeout time.Duration + recorder record.EventRecorder + scalerCaches map[string]*cache.ScalersCache + scalerCachesLock *sync.RWMutex + scaledObjectsMetricCache metricscache.MetricsCache + secretsLister corev1listers.SecretLister } // NewScaleHandler creates a ScaleHandler object func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, secretsLister corev1listers.SecretLister) ScaleHandler { return &scaleHandler{ - client: client, - logger: logf.Log.WithName("scalehandler"), - scaleLoopContexts: &sync.Map{}, - scaleExecutor: executor.NewScaleExecutor(client, scaleClient, reconcilerScheme, recorder), - globalHTTPTimeout: globalHTTPTimeout, - recorder: recorder, - scalerCaches: map[string]*cache.ScalersCache{}, - lock: &sync.RWMutex{}, - secretsLister: secretsLister, + client: client, + logger: logf.Log.WithName("scalehandler"), + scaleLoopContexts: &sync.Map{}, + scaleExecutor: executor.NewScaleExecutor(client, scaleClient, reconcilerScheme, recorder), + globalHTTPTimeout: globalHTTPTimeout, + recorder: recorder, + scalerCaches: map[string]*cache.ScalersCache{}, + scalerCachesLock: &sync.RWMutex{}, + scaledObjectsMetricCache: metricscache.NewMetricsCache(), + secretsLister: secretsLister, } } func (h *scaleHandler) HandleScalableObject(ctx context.Context, scalableObject interface{}) error { - withTriggers, err := asDuckWithTriggers(scalableObject) + withTriggers, err := kedav1alpha1.AsDuckWithTriggers(scalableObject) if err != nil { h.logger.Error(err, "error duck typing object into withTrigger") return err @@ -121,7 +124,7 @@ func (h *scaleHandler) HandleScalableObject(ctx context.Context, scalableObject } func (h *scaleHandler) DeleteScalableObject(ctx context.Context, scalableObject interface{}) error { - withTriggers, err := asDuckWithTriggers(scalableObject) + withTriggers, err := kedav1alpha1.AsDuckWithTriggers(scalableObject) if err != nil { h.logger.Error(err, "error duck typing object into withTrigger") return err @@ -173,76 +176,86 @@ func (h *scaleHandler) startScaleLoop(ctx context.Context, withTriggers *kedav1a } } -func (h *scaleHandler) getScalersCacheForScaledObject(ctx context.Context, scaledObjectName, scaledObjectNamespace string) (*cache.ScalersCache, error) { - key := kedav1alpha1.GenerateIdentifier("ScaledObject", scaledObjectNamespace, scaledObjectName) - - h.lock.RLock() - if cache, ok := h.scalerCaches[key]; ok { - h.lock.RUnlock() - return cache, nil - } - h.lock.RUnlock() - - h.lock.Lock() - defer h.lock.Unlock() - if cache, ok := h.scalerCaches[key]; ok { - return cache, nil - } - - scaledObject := &kedav1alpha1.ScaledObject{} - err := h.client.Get(ctx, types.NamespacedName{Name: scaledObjectName, Namespace: scaledObjectNamespace}, scaledObject) - if err != nil { - h.logger.Error(err, "failed to get ScaledObject", "name", scaledObjectName, "namespace", scaledObjectNamespace) - return nil, err - } - - podTemplateSpec, containerName, err := resolver.ResolveScaleTargetPodSpec(ctx, h.client, h.logger, scaledObject) - if err != nil { - return nil, err - } - - withTriggers, err := asDuckWithTriggers(scaledObject) +// GetScalersCache returns cache for input scalableObject, if the object is not found in the cache, it returns a new one +// if the input object is ScaledObject, it also compares the Generation of the input of object with the one stored in the cache, +// this is needed for out of scalerLoop invocations of this method (in package `controllers/keda`). +func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) { + withTriggers, err := kedav1alpha1.AsDuckWithTriggers(scalableObject) if err != nil { return nil, err } + key := withTriggers.GenerateIdentifier() + generation := withTriggers.Generation - scalers, err := h.buildScalers(ctx, withTriggers, podTemplateSpec, containerName) - if err != nil { - return nil, err - } + return h.performGetScalersCache(ctx, key, scalableObject, generation, "", "", "") +} - h.scalerCaches[key] = &cache.ScalersCache{ - ScaledObject: scaledObject, - Scalers: scalers, - Logger: h.logger, - Recorder: h.recorder, - } +// getScalersCacheForScaledObject returns cache for input ScaledObject, referenced by name and namespace +// we don't need to compare the Generation, because this method should be called only inside scale loop, where we have up to date object. +func (h *scaleHandler) getScalersCacheForScaledObject(ctx context.Context, scaledObjectName, scaledObjectNamespace string) (*cache.ScalersCache, error) { + key := kedav1alpha1.GenerateIdentifier("ScaledObject", scaledObjectNamespace, scaledObjectName) - return h.scalerCaches[key], nil + return h.performGetScalersCache(ctx, key, nil, -1, "ScaledObject", scaledObjectNamespace, scaledObjectName) } -// TODO refactor this together with GetScalersCacheForScaledObject() -func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) { - withTriggers, err := asDuckWithTriggers(scalableObject) - if err != nil { - return nil, err +// performGetScalersCache returns cache for input scalableObject, it is common code used by GetScalersCache() and getScalersCacheForScaledObject() methods +func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, scalableObject interface{}, scalableObjectGeneration int64, scalableObjectKind, scalableObjectNamespace, scalableObjectName string) (*cache.ScalersCache, error) { + h.scalerCachesLock.RLock() + if cache, ok := h.scalerCaches[key]; ok { + // generation was specified -> let's include it in the check as well + if scalableObjectGeneration != -1 { + if scalableObject != nil && cache.ScaledObject != nil && cache.ScaledObject.Generation == scalableObjectGeneration { + h.scalerCachesLock.RUnlock() + return cache, nil + } + } else { + h.scalerCachesLock.RUnlock() + return cache, nil + } } + h.scalerCachesLock.RUnlock() - key := withTriggers.GenerateIdentifier() + h.scalerCachesLock.Lock() + defer h.scalerCachesLock.Unlock() + if cache, ok := h.scalerCaches[key]; ok { + // generation was specified -> let's include it in the check as well + if scalableObjectGeneration != -1 { + if scalableObject != nil && cache.ScaledObject != nil && cache.ScaledObject.Generation == scalableObjectGeneration { + return cache, nil + } + } else { + return cache, nil + } + } - h.lock.RLock() - if cache, ok := h.scalerCaches[key]; ok && cache.Generation == withTriggers.Generation { - h.lock.RUnlock() - return cache, nil + if scalableObject == nil { + switch scalableObjectKind { + case "ScaledObject": + scaledObject := &kedav1alpha1.ScaledObject{} + err := h.client.Get(ctx, types.NamespacedName{Name: scalableObjectName, Namespace: scalableObjectNamespace}, scaledObject) + if err != nil { + h.logger.Error(err, "failed to get ScaledObject", "name", scalableObjectName, "namespace", scalableObjectNamespace) + return nil, err + } + scalableObject = scaledObject + case "ScaledJob": + scaledJob := &kedav1alpha1.ScaledJob{} + err := h.client.Get(ctx, types.NamespacedName{Name: scalableObjectName, Namespace: scalableObjectNamespace}, scaledJob) + if err != nil { + h.logger.Error(err, "failed to get ScaledJob", "name", scalableObjectName, "namespace", scalableObjectNamespace) + return nil, err + } + scalableObject = scaledJob + default: + err := fmt.Errorf("unknown ScalableObjectKind, got=%q", scalableObjectKind) + h.logger.Error(err, "unknown kind", "name", scalableObjectName, "namespace", scalableObjectNamespace) + return nil, err + } } - h.lock.RUnlock() - h.lock.Lock() - defer h.lock.Unlock() - if cache, ok := h.scalerCaches[key]; ok && cache.Generation == withTriggers.Generation { - return cache, nil - } else if ok { - cache.Close(ctx) + withTriggers, err := kedav1alpha1.AsDuckWithTriggers(scalableObject) + if err != nil { + return nil, err } podTemplateSpec, containerName, err := resolver.ResolveScaleTargetPodSpec(ctx, h.client, h.logger, scalableObject) @@ -256,10 +269,8 @@ func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject inter } newCache := &cache.ScalersCache{ - Generation: withTriggers.Generation, - Scalers: scalers, - Logger: h.logger, - Recorder: h.recorder, + Scalers: scalers, + Recorder: h.recorder, } switch obj := scalableObject.(type) { case *kedav1alpha1.ScaledObject: @@ -273,15 +284,17 @@ func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject inter } func (h *scaleHandler) ClearScalersCache(ctx context.Context, scalableObject interface{}) error { - withTriggers, err := asDuckWithTriggers(scalableObject) + withTriggers, err := kedav1alpha1.AsDuckWithTriggers(scalableObject) if err != nil { return err } key := withTriggers.GenerateIdentifier() - h.lock.Lock() - defer h.lock.Unlock() + go h.scaledObjectsMetricCache.Delete(key) + + h.scalerCachesLock.Lock() + defer h.scalerCachesLock.Unlock() if cache, ok := h.scalerCaches[key]; ok { h.logger.V(1).WithValues("key", key).Info("Removing entry from ScalersCache") cache.Close(ctx) @@ -340,8 +353,12 @@ func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interfac h.logger.Error(err, "Error getting scaledObject", "object", scalableObject) return } - isActive, isError, _ := cache.IsScaledObjectActive(ctx, obj) + isActive, isError, metricsRecords := cache.GetScaledObjectState(ctx, obj) h.scaleExecutor.RequestScale(ctx, obj, isActive, isError) + if len(metricsRecords) > 0 { + h.logger.V(1).Info("Storing metrics to cache", "scaledObject.Namespace", obj.Namespace, "scaledObject.Name", obj.Name, "metricsRecords", metricsRecords) + h.scaledObjectsMetricCache.StoreRecords(obj.GenerateIdentifier(), metricsRecords) + } case *kedav1alpha1.ScaledJob: err = h.client.Get(ctx, types.NamespacedName{Name: obj.Name, Namespace: obj.Namespace}, obj) if err != nil { @@ -353,7 +370,10 @@ func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interfac } } -func (h *scaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) { +// GetScaledObjectMetrics returns metrics for specified metric name for a ScaledObject identified by it's name and namespace. +// The second return value are Prometheus metrics that needed to be exposed (used by DEPRECATED Prometheus Server on KEDA Metrics Server) +// It could either query the metric value directly from the scaler or from a cache, that's being stored for the scaler. +func (h *scaleHandler) GetScaledObjectMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) { var matchingMetrics []external_metrics.ExternalMetricValue exportedPromMetrics := metricsserviceapi.PromMetricsMsg{ @@ -381,14 +401,14 @@ func (h *scaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, return nil, &exportedPromMetrics, err } - // let's check metrics for all scalers in a ScaledObject scalerError := false - scalers, scalerConfigs := cache.GetScalers() - - h.logger.V(1).WithValues("name", scaledObjectName, "namespace", scaledObjectNamespace, "metricName", metricName, "scalers", scalers).Info("Getting metric value") + scaledObjectIdentifier := scaledObject.GenerateIdentifier() + // let's check metrics for all scalers in a ScaledObject + scalers, scalerConfigs := cache.GetScalers() for scalerIndex := 0; scalerIndex < len(scalers); scalerIndex++ { metricSpecs := scalers[scalerIndex].GetMetricSpecForScaling(ctx) + scalerName := strings.Replace(fmt.Sprintf("%T", scalers[scalerIndex]), "*scalers.", "", 1) if scalerConfigs[scalerIndex].TriggerName != "" { scalerName = scalerConfigs[scalerIndex].TriggerName @@ -402,7 +422,23 @@ func (h *scaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, // Filter only the desired metric if strings.EqualFold(metricSpec.External.Metric.Name, metricName) { - metrics, err := cache.GetMetricsForScaler(ctx, scalerIndex, metricName) + var metrics []external_metrics.ExternalMetricValue + + // if cache is defined for this scaler/metric, let's try to hit it first + metricsFoundInCache := false + if scalerConfigs[scalerIndex].TriggerUseCachedMetrics { + var metricsRecord metricscache.MetricsRecord + if metricsRecord, metricsFoundInCache = h.scaledObjectsMetricCache.ReadRecord(scaledObjectIdentifier, metricSpec.External.Metric.Name); metricsFoundInCache { + h.logger.V(1).Info("Reading metrics from cache", "scaledObject.Namespace", scaledObjectNamespace, "scaledObject.Name", scaledObjectName, "scaler", scalerName, "metricName", metricSpec.External.Metric.Name, "metricsRecord", metricsRecord) + metrics = metricsRecord.Metric + err = metricsRecord.ScalerError + } + } + + if !metricsFoundInCache { + metrics, err = cache.GetMetricsForScaler(ctx, scalerIndex, metricName) + h.logger.V(1).Info("Getting metrics from scaler", "scaledObject.Namespace", scaledObjectNamespace, "scaledObject.Name", scaledObjectName, "scaler", scalerName, "metricName", metricSpec.External.Metric.Name, "metrics", metrics, "scalerError", err) + } metrics, err = fallback.GetMetricsWithFallback(ctx, h.client, h.logger, metrics, err, metricName, scaledObject, metricSpec) if err != nil { scalerError = true @@ -479,6 +515,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp ScalableObjectType: withTriggers.Kind, TriggerName: trigger.Name, TriggerMetadata: trigger.Metadata, + TriggerUseCachedMetrics: trigger.UseCachedMetrics, ResolvedEnv: resolvedEnv, AuthParams: make(map[string]string), GlobalHTTPTimeout: h.globalHTTPTimeout, @@ -647,29 +684,3 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, } // TRIGGERS-END } - -func asDuckWithTriggers(scalableObject interface{}) (*kedav1alpha1.WithTriggers, error) { - switch obj := scalableObject.(type) { - case *kedav1alpha1.ScaledObject: - return &kedav1alpha1.WithTriggers{ - TypeMeta: obj.TypeMeta, - ObjectMeta: obj.ObjectMeta, - Spec: kedav1alpha1.WithTriggersSpec{ - PollingInterval: obj.Spec.PollingInterval, - Triggers: obj.Spec.Triggers, - }, - }, nil - case *kedav1alpha1.ScaledJob: - return &kedav1alpha1.WithTriggers{ - TypeMeta: obj.TypeMeta, - ObjectMeta: obj.ObjectMeta, - Spec: kedav1alpha1.WithTriggersSpec{ - PollingInterval: obj.Spec.PollingInterval, - Triggers: obj.Spec.Triggers, - }, - }, nil - default: - // here could be the conversion from unknown Duck type potentially in the future - return nil, fmt.Errorf("unknown scalable object type %v", scalableObject) - } -} diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go index 2acc0e2903b..44d6275aaa2 100644 --- a/pkg/scaling/scale_handler_test.go +++ b/pkg/scaling/scale_handler_test.go @@ -19,34 +19,230 @@ package scaling import ( "context" "errors" + "sync" "testing" + "time" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - logf "sigs.k8s.io/controller-runtime/pkg/log" + "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/mock/mock_client" mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler" + "github.com/kedacore/keda/v2/pkg/mock/mock_scaling/mock_executor" "github.com/kedacore/keda/v2/pkg/scalers" "github.com/kedacore/keda/v2/pkg/scaling/cache" + "github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache" ) +func TestGetScaledObjectMetrics_DirectCall(t *testing.T) { + scaledObjectName := "testName" + scaledObjectNamespace := "testNamespace" + metricName := "test-metric-name" + longPollingInterval := int32(300) + + ctrl := gomock.NewController(t) + recorder := record.NewFakeRecorder(1) + mockClient := mock_client.NewMockClient(ctrl) + mockExecutor := mock_executor.NewMockScaleExecutor(ctrl) + mockStatusWriter := mock_client.NewMockStatusWriter(ctrl) + + metricsSpecs := []v2.MetricSpec{createMetricSpec(10, metricName)} + metricValue := scalers.GenerateMetricInMili(metricName, float64(10)) + + metricsRecord := map[string]metricscache.MetricsRecord{} + metricsRecord[metricName] = metricscache.MetricsRecord{ + IsActive: true, + Metric: []external_metrics.ExternalMetricValue{metricValue}, + ScalerError: nil, + } + + scaler := mock_scalers.NewMockScaler(ctrl) + // we are going to query metrics directly + scalerConfig := scalers.ScalerConfig{TriggerUseCachedMetrics: false} + factory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { + return scaler, &scalerConfig, nil + } + + scaledObject := kedav1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaledObjectName, + Namespace: scaledObjectNamespace, + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + Name: "test", + }, + PollingInterval: &longPollingInterval, + }, + Status: kedav1alpha1.ScaledObjectStatus{ + ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{ + Group: "apps", + Kind: "Deployment", + }, + }, + } + + scalerCache := cache.ScalersCache{ + ScaledObject: &scaledObject, + Scalers: []cache.ScalerBuilder{{ + Scaler: scaler, + ScalerConfig: scalerConfig, + Factory: factory, + }}, + Recorder: recorder, + } + + caches := map[string]*cache.ScalersCache{} + caches[scaledObject.GenerateIdentifier()] = &scalerCache + + sh := scaleHandler{ + client: mockClient, + logger: logr.Discard(), + scaleLoopContexts: &sync.Map{}, + scaleExecutor: mockExecutor, + globalHTTPTimeout: time.Duration(1000), + recorder: recorder, + scalerCaches: caches, + scalerCachesLock: &sync.RWMutex{}, + scaledObjectsMetricCache: metricscache.NewMetricsCache(), + } + + mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue}, true, nil) + mockExecutor.EXPECT().RequestScale(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + sh.checkScalers(context.TODO(), &scaledObject, &sync.RWMutex{}) + + mockClient.EXPECT().Status().Return(mockStatusWriter) + mockStatusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + // hitting directly GetMetricsAndActivity() + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue}, true, nil) + metrics, promMsg, err := sh.GetScaledObjectMetrics(context.TODO(), scaledObjectName, scaledObjectNamespace, metricName) + assert.NotNil(t, metrics) + assert.NotNil(t, promMsg) + assert.Nil(t, err) + + scaler.EXPECT().Close(gomock.Any()) + scalerCache.Close(context.Background()) +} + +func TestGetScaledObjectMetrics_FromCache(t *testing.T) { + scaledObjectName := "testName2" + scaledObjectNamespace := "testNamespace2" + metricName := "test-metric-name2" + longPollingInterval := int32(300) + + ctrl := gomock.NewController(t) + recorder := record.NewFakeRecorder(1) + mockClient := mock_client.NewMockClient(ctrl) + mockExecutor := mock_executor.NewMockScaleExecutor(ctrl) + mockStatusWriter := mock_client.NewMockStatusWriter(ctrl) + + metricsSpecs := []v2.MetricSpec{createMetricSpec(10, metricName)} + metricValue := scalers.GenerateMetricInMili(metricName, float64(10)) + + metricsRecord := map[string]metricscache.MetricsRecord{} + metricsRecord[metricName] = metricscache.MetricsRecord{ + IsActive: true, + Metric: []external_metrics.ExternalMetricValue{metricValue}, + ScalerError: nil, + } + + scaler := mock_scalers.NewMockScaler(ctrl) + // we are going to use cache for metrics values + scalerConfig := scalers.ScalerConfig{TriggerUseCachedMetrics: true} + factory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { + return scaler, &scalerConfig, nil + } + + scaledObject := kedav1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: scaledObjectName, + Namespace: scaledObjectNamespace, + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + Name: "test", + }, + PollingInterval: &longPollingInterval, + }, + Status: kedav1alpha1.ScaledObjectStatus{ + ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{ + Group: "apps", + Kind: "Deployment", + }, + }, + } + + scalerCache := cache.ScalersCache{ + ScaledObject: &scaledObject, + Scalers: []cache.ScalerBuilder{{ + Scaler: scaler, + ScalerConfig: scalerConfig, + Factory: factory, + }}, + Recorder: recorder, + } + + caches := map[string]*cache.ScalersCache{} + caches[scaledObject.GenerateIdentifier()] = &scalerCache + + sh := scaleHandler{ + client: mockClient, + logger: logr.Discard(), + scaleLoopContexts: &sync.Map{}, + scaleExecutor: mockExecutor, + globalHTTPTimeout: time.Duration(1000), + recorder: recorder, + scalerCaches: caches, + scalerCachesLock: &sync.RWMutex{}, + scaledObjectsMetricCache: metricscache.NewMetricsCache(), + } + + mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{metricValue}, true, nil) + mockExecutor.EXPECT().RequestScale(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + sh.checkScalers(context.TODO(), &scaledObject, &sync.RWMutex{}) + + mockClient.EXPECT().Status().Return(mockStatusWriter) + mockStatusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + // hitting cache here instead of calling GetMetricsAndActivity() + metrics, promMsg, err := sh.GetScaledObjectMetrics(context.TODO(), scaledObjectName, scaledObjectNamespace, metricName) + assert.NotNil(t, metrics) + assert.NotNil(t, promMsg) + assert.Nil(t, err) + + scaler.EXPECT().Close(gomock.Any()) + scalerCache.Close(context.Background()) +} + func TestCheckScaledObjectScalersWithError(t *testing.T) { ctrl := gomock.NewController(t) recorder := record.NewFakeRecorder(1) + metricsSpecs := []v2.MetricSpec{createMetricSpec(1, "metric-name")} + + scaler := mock_scalers.NewMockScaler(ctrl) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, false, errors.New("some error")) + scaler.EXPECT().Close(gomock.Any()) + factory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { scaler := mock_scalers.NewMockScaler(ctrl) - scaler.EXPECT().IsActive(gomock.Any()).Return(false, errors.New("some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, false, errors.New("some error")) scaler.EXPECT().Close(gomock.Any()) return scaler, &scalers.ScalerConfig{}, nil } - scaler, _, err := factory() - assert.Nil(t, err) scaledObject := kedav1alpha1.ScaledObject{ ObjectMeta: metav1.ObjectMeta{ @@ -65,11 +261,10 @@ func TestCheckScaledObjectScalersWithError(t *testing.T) { Scaler: scaler, Factory: factory, }}, - Logger: logf.Log.WithName("scalehandler"), Recorder: recorder, } - isActive, isError, _ := cache.IsScaledObjectActive(context.TODO(), &scaledObject) + isActive, isError, _ := cache.GetScaledObjectState(context.TODO(), &scaledObject) cache.Close(context.Background()) assert.Equal(t, false, isActive) @@ -80,12 +275,12 @@ func TestCheckScaledObjectFindFirstActiveNotIgnoreOthers(t *testing.T) { ctrl := gomock.NewController(t) recorder := record.NewFakeRecorder(1) - metricsSpecs := []v2.MetricSpec{createMetricSpec(1)} + metricsSpecs := []v2.MetricSpec{createMetricSpec(1, "metric-name")} activeFactory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { scaler := mock_scalers.NewMockScaler(ctrl) - scaler.EXPECT().IsActive(gomock.Any()).Return(true, nil) - scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Times(2).Return(metricsSpecs) + scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, true, nil) scaler.EXPECT().Close(gomock.Any()) return scaler, &scalers.ScalerConfig{}, nil } @@ -94,12 +289,14 @@ func TestCheckScaledObjectFindFirstActiveNotIgnoreOthers(t *testing.T) { failingFactory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { scaler := mock_scalers.NewMockScaler(ctrl) - scaler.EXPECT().IsActive(gomock.Any()).Return(false, errors.New("some error")) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, false, errors.New("some error")) scaler.EXPECT().Close(gomock.Any()) return scaler, &scalers.ScalerConfig{}, nil } - failingScaler, _, err := failingFactory() - assert.Nil(t, err) + failingScaler := mock_scalers.NewMockScaler(ctrl) + failingScaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) + failingScaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, false, errors.New("some error")) + failingScaler.EXPECT().Close(gomock.Any()) scaledObject := &kedav1alpha1.ScaledObject{ ObjectMeta: metav1.ObjectMeta{ @@ -123,24 +320,26 @@ func TestCheckScaledObjectFindFirstActiveNotIgnoreOthers(t *testing.T) { scalersCache := cache.ScalersCache{ Scalers: scalers, - Logger: logf.Log.WithName("scalercache"), Recorder: recorder, } - isActive, isError, _ := scalersCache.IsScaledObjectActive(context.TODO(), scaledObject) + isActive, isError, _ := scalersCache.GetScaledObjectState(context.TODO(), scaledObject) scalersCache.Close(context.Background()) assert.Equal(t, true, isActive) assert.Equal(t, true, isError) } -func createMetricSpec(averageValue int64) v2.MetricSpec { +func createMetricSpec(averageValue int64, metricName string) v2.MetricSpec { qty := resource.NewQuantity(averageValue, resource.DecimalSI) return v2.MetricSpec{ External: &v2.ExternalMetricSource{ Target: v2.MetricTarget{ AverageValue: qty, }, + Metric: v2.MetricIdentifier{ + Name: metricName, + }, }, } } diff --git a/tests/internals/cache_metrics/cache_metrics_test.go b/tests/internals/cache_metrics/cache_metrics_test.go new file mode 100644 index 00000000000..5486bae6663 --- /dev/null +++ b/tests/internals/cache_metrics/cache_metrics_test.go @@ -0,0 +1,212 @@ +//go:build e2e +// +build e2e + +package cache_metrics_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +const ( + testName = "cache-metrics-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + monitoredDeploymentName = fmt.Sprintf("%s-monitored", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + defaultMonitoredDeploymentReplicas = 8 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + MonitoredDeploymentName string + MonitoredDeploymentReplicas int + EnableUseCachedMetrics bool +} + +const ( + monitoredDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.MonitoredDeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.MonitoredDeploymentName}} +spec: + replicas: {{.MonitoredDeploymentReplicas}} + selector: + matchLabels: + app: {{.MonitoredDeploymentName}} + template: + metadata: + labels: + app: {{.MonitoredDeploymentName}} + spec: + containers: + - name: {{.MonitoredDeploymentName}} + image: nginx +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: {{.DeploymentName}} + image: nginx +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 1 + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 1000 + minReplicaCount: 0 + maxReplicaCount: 10 + cooldownPeriod: 0 + triggers: + - type: kubernetes-workload + useCachedMetrics: {{.EnableUseCachedMetrics}} + metadata: + podSelector: 'app={{.MonitoredDeploymentName}}' + value: '2' +` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // test direct metrics query (the standard behavior) + testDirectQuery(t, kc, data) + + // test querying metrics on polling interval + testCacheMetricsOnPollingInterval(t, kc, data) + + // cleanup + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MonitoredDeploymentName: monitoredDeploymentName, + MonitoredDeploymentReplicas: defaultMonitoredDeploymentReplicas, + }, []Template{ + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, + } +} + +func testCacheMetricsOnPollingInterval(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing caching metrics on polling interval ---") + + KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + data.MonitoredDeploymentReplicas = defaultMonitoredDeploymentReplicas + KubectlApplyWithTemplate(t, data, "monitoredDeploymentTemplate", monitoredDeploymentTemplate) + + // initial replica count for a deployment + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + + // initial replica count for a monitored deployment + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredDeploymentName, testNamespace, defaultMonitoredDeploymentReplicas, 60, 1), + fmt.Sprintf("replica count should be %d after 1 minute", defaultMonitoredDeploymentReplicas)) + + data.EnableUseCachedMetrics = true + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + // Metric Value = 8, DesiredAverageMetricValue = 2 + // should scale in to 8/2 = 4 replicas, irrespective of current replicas + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), + "replica count should be 4 after 1 minute") + + // Changing Metric Value to 4, but because we have a long polling interval, the replicas number should remain the same + data.MonitoredDeploymentReplicas = 4 + KubectlApplyWithTemplate(t, data, "monitoredDeploymentTemplate", monitoredDeploymentTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredDeploymentName, testNamespace, 4, 60, 1), + fmt.Sprintf("replica count should be %d after 1 minute", defaultMonitoredDeploymentReplicas)) + + // Let's wait at least 60s + // the usual setting for `horizontal-pod-autoscaler-sync-period` is 15s) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 4, 60) + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) +} + +func testDirectQuery(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing query metrics directly ---") + + KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + data.MonitoredDeploymentReplicas = defaultMonitoredDeploymentReplicas + KubectlApplyWithTemplate(t, data, "monitoredDeploymentTemplate", monitoredDeploymentTemplate) + + // initial replica count for a deployment + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + + // initial replica count for a monitored deployment + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredDeploymentName, testNamespace, defaultMonitoredDeploymentReplicas, 60, 1), + fmt.Sprintf("replica count should be %d after 1 minute", defaultMonitoredDeploymentReplicas)) + + data.EnableUseCachedMetrics = false + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + // Metric Value = 8, DesiredAverageMetricValue = 2 + // should scale in to 8/2 = 4 replicas, irrespective of current replicas + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 4, 60, 1), + "replica count should be 4 after 1 minute") + + // Changing Metric Value to 4, deployment should scale to 2 + data.MonitoredDeploymentReplicas = 4 + KubectlApplyWithTemplate(t, data, "monitoredDeploymentTemplate", monitoredDeploymentTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredDeploymentName, testNamespace, 4, 60, 1), + fmt.Sprintf("replica count should be %d after 1 minute", defaultMonitoredDeploymentReplicas)) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 120, 1), + "replica count should be 2 after 2 minutes") + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) +} diff --git a/tests/scalers/external_scaler_so/external_scaler_so_test.go b/tests/scalers/external_scaler_so/external_scaler_so_test.go index 4ae10e40618..3fc38ae9ba4 100644 --- a/tests/scalers/external_scaler_so/external_scaler_so_test.go +++ b/tests/scalers/external_scaler_so/external_scaler_so_test.go @@ -138,6 +138,9 @@ func TestScaler(t *testing.T) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, scalerName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + // test scaling testScaleOut(t, kc, data) testScaleIn(t, kc, data) @@ -177,8 +180,8 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { data.MetricValue = data.MetricThreshold * 2 KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 1), - "replica count should be 2 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), + "replica count should be 2 after 2 minutes") } func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -188,6 +191,6 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { data.MetricValue = 0 KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), - "replica count should be 0 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 2), + "replica count should be 0 after 2 minutes") }