diff --git a/CHANGELOG.md b/CHANGELOG.md index 91445f13e5f..993702132d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New +- **General**: Consolidate all exposed Prometheus Metrics in KEDA Operator ([#3919](https://github.com/kedacore/keda/issues/3919)) - **General**: Disable response compression for k8s restAPI in client-go ([#3863](https://github.com/kedacore/keda/issues/3863)). Kubernetes issue for reference (https://github.com/kubernetes/kubernetes/issues/112296) - **General**: Expand Prometheus metric with label "ScalerName" to distinguish different triggers. The scaleName is defined per Trigger.Name ([#3588](https://github.com/kedacore/keda/issues/3588)) - **General:** Introduce new Loki Scaler ([#3699](https://github.com/kedacore/keda/issues/3699)) @@ -91,6 +92,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - **General**: Bump Golang to 1.18.6 ([#3205](https://github.com/kedacore/keda/issues/3205)) - **General**: Bump `github.com/Azure/azure-event-hubs-go/v3` ([#2986](https://github.com/kedacore/keda/issues/2986)) - **General**: Migrate from `azure-service-bus-go` to `azservicebus` ([#3394](https://github.com/kedacore/keda/issues/3394)) +- **General**: Metrics Server: use gRPC connection to get metrics from Operator ([#3920](https://github.com/kedacore/keda/issues/3920)) - **General**: Metrics Server: use OpenAPI definitions served by custom-metrics-apiserver ([#3929](https://github.com/kedacore/keda/issues/3929)) - **Azure EventHub**: Add e2e tests ([#2792](https://github.com/kedacore/keda/issues/2792)) diff --git a/CREATE-NEW-SCALER.md b/CREATE-NEW-SCALER.md index 03a396ffd02..3b4b8c89411 100644 --- a/CREATE-NEW-SCALER.md +++ b/CREATE-NEW-SCALER.md @@ -40,7 +40,6 @@ KEDA works in conjunction with Kubernetes Horizontal Pod Autoscaler (HPA). When The return type of this function is `MetricSpec`, but in KEDA's case we will mostly write External metrics. So the property that should be filled is `ExternalMetricSource`, where the: - `MetricName`: the name of our metric we are returning in this scaler. The name should be unique, to allow setting multiple (even the same type) Triggers in one ScaledObject, but each function call should return the same name. -- `MetricSelector`: //TODO - `TargetValue`: is the value of the metric we want to reach at all times at all costs. As long as the current metric doesn't match TargetValue, HPA will increase the number of the pods until it reaches the maximum number of pods allowed to scale to. - `TargetAverageValue`: the value of the metric for which we require one pod to handle. e.g. if we are have a scaler based on the length of a message queue, and we specificy 10 for `TargetAverageValue`, we are saying that each pod will handle 10 messages. So if the length of the queue becomes 30, we expect that we have 3 pods in our cluster. (`TargetAverage` and `TargetValue` are mutually exclusive) diff --git a/Makefile b/Makefile index 795ff2b8374..f39e33c70c9 100644 --- a/Makefile +++ b/Makefile @@ -141,6 +141,7 @@ clientset-generate: ## Generate client-go clientset, listers and informers. proto-gen: protoc-gen ## Generate Liiklus, ExternalScaler and MetricsService proto PATH="$(LOCALBIN):$(PATH)" protoc -I vendor --proto_path=hack LiiklusService.proto --go_out=pkg/scalers/liiklus --go-grpc_out=pkg/scalers/liiklus PATH="$(LOCALBIN):$(PATH)" protoc -I vendor --proto_path=pkg/scalers/externalscaler externalscaler.proto --go_out=pkg/scalers/externalscaler --go-grpc_out=pkg/scalers/externalscaler + PATH="$(LOCALBIN):$(PATH)" protoc -I vendor --proto_path=pkg/metricsservice/api metrics.proto --go_out=pkg/metricsservice/api --go-grpc_out=pkg/metricsservice/api .PHONY: mockgen-gen mockgen-gen: mockgen pkg/mock/mock_scaling/mock_interface.go pkg/mock/mock_scaler/mock_scaler.go pkg/mock/mock_scale/mock_interfaces.go pkg/mock/mock_client/mock_interfaces.go pkg/scalers/liiklus/mocks/mock_liiklus.go diff --git a/adapter/main.go b/adapter/main.go index f0664fdb44c..28056d0a646 100644 --- a/adapter/main.go +++ b/adapter/main.go @@ -41,7 +41,8 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" kedacontrollers "github.com/kedacore/keda/v2/controllers/keda" - "github.com/kedacore/keda/v2/pkg/prommetrics" + "github.com/kedacore/keda/v2/pkg/metricsservice" + prommetrics "github.com/kedacore/keda/v2/pkg/prommetrics/adapter" kedaprovider "github.com/kedacore/keda/v2/pkg/provider" "github.com/kedacore/keda/v2/pkg/scaling" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -65,6 +66,7 @@ var ( adapterClientRequestBurst int metricsAPIServerPort int disableCompression bool + metricsServiceAddr string ) func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Duration, maxConcurrentReconciles int) (provider.MetricsProvider, <-chan struct{}, error) { @@ -115,6 +117,12 @@ func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Durat return nil, nil, fmt.Errorf("invalid KEDA_METRICS_LEADER_ELECTION_RETRY_PERIOD (%s)", err) } + useMetricsServiceGrpc, err := kedautil.ResolveOsEnvBool("KEDA_USE_METRICS_SERVICE_GRPC", true) + if err != nil { + logger.Error(err, "Invalid KEDA_USE_METRICS_SERVICE_GRPC") + return nil, nil, fmt.Errorf("invalid KEDA_USE_METRICS_SERVICE_GRPC (%s)", err) + } + metricsBindAddress := fmt.Sprintf(":%v", metricsAPIServerPort) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ MetricsBindAddress: metricsBindAddress, @@ -137,13 +145,19 @@ func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Durat prometheusServer := &prommetrics.PrometheusMetricServer{} go func() { prometheusServer.NewServer(fmt.Sprintf(":%v", prometheusMetricsPort), prometheusMetricsPath) }() - stopCh := make(chan struct{}) - if err := runScaledObjectController(ctx, mgr, handler, logger, externalMetricsInfo, externalMetricsInfoLock, maxConcurrentReconciles, stopCh); err != nil { + logger.Info("Connecting Metrics Service gRPC client to the server", "address", metricsServiceAddr) + grpcClient, err := metricsservice.NewGrpcClient(metricsServiceAddr) + if err != nil { + logger.Error(err, "error connecting Metrics Service gRPC client to the server", "address", metricsServiceAddr) return nil, nil, err } - return kedaprovider.NewProvider(ctx, logger, handler, mgr.GetClient(), namespace, externalMetricsInfo, externalMetricsInfoLock), stopCh, nil + stopCh := make(chan struct{}) + if err := runScaledObjectController(ctx, mgr, handler, logger, externalMetricsInfo, externalMetricsInfoLock, maxConcurrentReconciles, stopCh); err != nil { + return nil, nil, err + } + return kedaprovider.NewProvider(ctx, logger, handler, mgr.GetClient(), *grpcClient, useMetricsServiceGrpc, namespace, externalMetricsInfo, externalMetricsInfoLock), stopCh, nil } func runScaledObjectController(ctx context.Context, mgr manager.Manager, scaleHandler scaling.ScaleHandler, logger logr.Logger, externalMetricsInfo *[]provider.ExternalMetricInfo, externalMetricsInfoLock *sync.RWMutex, maxConcurrentReconciles int, stopCh chan<- struct{}) error { @@ -167,6 +181,19 @@ func runScaledObjectController(ctx context.Context, mgr manager.Manager, scaleHa return nil } +// generateDefaultMetricsServiceAddr generates default Metrics Service gRPC Server address based on the current Namespace. +// By default the Metrics Service gRPC Server runs in the same namespace on the keda-operator pod. +func generateDefaultMetricsServiceAddr() string { + const defaultNamespace = "keda" + podNamespace := os.Getenv("POD_NAMESPACE") + + if podNamespace == "" { + podNamespace = defaultNamespace + } + + return fmt.Sprintf("keda-operator.%s.svc.cluster.local:9666", podNamespace) +} + func printVersion() { logger.Info(fmt.Sprintf("KEDA Version: %s", version.Version)) logger.Info(fmt.Sprintf("KEDA Commit: %s", version.GitCommit)) @@ -204,6 +231,7 @@ func main() { cmd.Flags().IntVar(&metricsAPIServerPort, "port", 8080, "Set the port for the metrics API server") cmd.Flags().IntVar(&prometheusMetricsPort, "metrics-port", 9022, "Set the port to expose prometheus metrics") cmd.Flags().StringVar(&prometheusMetricsPath, "metrics-path", "/metrics", "Set the path for the prometheus metrics endpoint") + cmd.Flags().StringVar(&metricsServiceAddr, "metrics-service-address", generateDefaultMetricsServiceAddr(), "The address of the gRPRC Metrics Service Server.") cmd.Flags().Float32Var(&adapterClientRequestQPS, "kube-api-qps", 20.0, "Set the QPS rate for throttling requests sent to the apiserver") cmd.Flags().IntVar(&adapterClientRequestBurst, "kube-api-burst", 30, "Set the burst for throttling requests sent to the apiserver") cmd.Flags().BoolVar(&disableCompression, "disable-compression", true, "Disable response compression for k8s restAPI in client-go. ") diff --git a/apis/keda/v1alpha1/indentifier.go b/apis/keda/v1alpha1/indentifier.go new file mode 100644 index 00000000000..62eed83b599 --- /dev/null +++ b/apis/keda/v1alpha1/indentifier.go @@ -0,0 +1,27 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" +) + +// GenerateIdentifier returns identifier for the object in form "kind.namespace.name" (lowercase) +func GenerateIdentifier(kind, namespace, name string) string { + return strings.ToLower(fmt.Sprintf("%s.%s.%s", kind, namespace, name)) +} diff --git a/apis/keda/v1alpha1/withtriggers_types.go b/apis/keda/v1alpha1/withtriggers_types.go index 00b9821cff7..f442c63deb4 100644 --- a/apis/keda/v1alpha1/withtriggers_types.go +++ b/apis/keda/v1alpha1/withtriggers_types.go @@ -17,8 +17,6 @@ limitations under the License. package v1alpha1 import ( - "fmt" - "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -89,7 +87,7 @@ func (t *WithTriggers) GetPollingInterval() time.Duration { return time.Second * time.Duration(defaultPollingInterval) } -// GenerateIdenitifier returns identifier for the object in for "kind.namespace.name" -func (t *WithTriggers) GenerateIdenitifier() string { - return strings.ToLower(fmt.Sprintf("%s.%s.%s", t.Kind, t.Namespace, t.Name)) +// GenerateIdentifier returns identifier for the object in for "kind.namespace.name" +func (t *WithTriggers) GenerateIdentifier() string { + return GenerateIdentifier(t.Kind, t.Namespace, t.Name) } diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index fffed9cd72e..4e091d1f888 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -128,6 +128,128 @@ spec: left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/' format: int32 type: integer + podFailurePolicy: + description: "Specifies the policy of handling failed pods. In + particular, it allows to specify the set of actions and conditions + which need to be satisfied to take the associated action. If + empty, the default behaviour applies - the counter of failed + pods, represented by the jobs's .status.failed field, is incremented + and it is checked against the backoffLimit. This field cannot + be used in combination with restartPolicy=OnFailure. \n This + field is alpha-level. To use this field, you must enable the + `JobPodFailurePolicy` feature gate (disabled by default)." + properties: + rules: + description: A list of pod failure policy rules. The rules + are evaluated in order. Once a rule matches a Pod failure, + the remaining of the rules are ignored. When no rule matches + the Pod failure, the default handling applies - the counter + of pod failures is incremented and it is checked against + the backoffLimit. At most 20 elements are allowed. + items: + description: PodFailurePolicyRule describes how a pod failure + is handled when the requirements are met. One of OnExitCodes + and onPodConditions, but not both, can be used in each + rule. + properties: + action: + description: 'Specifies the action taken on a pod failure + when the requirements are satisfied. Possible values + are: - FailJob: indicates that the pod''s job is marked + as Failed and all running pods are terminated. - Ignore: + indicates that the counter towards the .backoffLimit + is not incremented and a replacement pod is created. + - Count: indicates that the pod is handled in the + default way - the counter towards the .backoffLimit + is incremented. Additional values are considered to + be added in the future. Clients should react to an + unknown action by skipping the rule.' + type: string + onExitCodes: + description: Represents the requirement on the container + exit codes. + properties: + containerName: + description: Restricts the check for exit codes + to the container with the specified name. When + null, the rule applies to all containers. When + specified, it should match one the container or + initContainer names in the pod template. + type: string + operator: + description: 'Represents the relationship between + the container exit code(s) and the specified values. + Containers completed with success (exit code 0) + are excluded from the requirement check. Possible + values are: - In: the requirement is satisfied + if at least one container exit code (might be + multiple if there are multiple containers not + restricted by the ''containerName'' field) is + in the set of specified values. - NotIn: the requirement + is satisfied if at least one container exit code + (might be multiple if there are multiple containers + not restricted by the ''containerName'' field) + is not in the set of specified values. Additional + values are considered to be added in the future. + Clients should react to an unknown operator by + assuming the requirement is not satisfied.' + type: string + values: + description: Specifies the set of values. Each returned + container exit code (might be multiple in case + of multiple containers) is checked against this + set of values with respect to the operator. The + list of values must be ordered and must not contain + duplicates. Value '0' cannot be used for the In + operator. At least one element is required. At + most 255 elements are allowed. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + - values + type: object + onPodConditions: + description: Represents the requirement on the pod conditions. + The requirement is represented as a list of pod condition + patterns. The requirement is satisfied if at least + one pattern matches an actual pod condition. At most + 20 elements are allowed. + items: + description: PodFailurePolicyOnPodConditionsPattern + describes a pattern for matching an actual pod condition + type. + properties: + status: + description: Specifies the required Pod condition + status. To match a pod condition it is required + that the specified status equals the pod condition + status. Defaults to True. + type: string + type: + description: Specifies the required Pod condition + type. To match a pod condition it is required + that specified type equals the pod condition + type. + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + required: + - action + - onPodConditions + type: object + type: array + x-kubernetes-list-type: atomic + required: + - rules + type: object selector: description: 'A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: @@ -1774,13 +1896,13 @@ spec: type: string ports: description: List of ports to expose from the container. - Exposing a port here gives the system additional - information about the network connections a container - uses, but is primarily informational. Not specifying - a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default - "0.0.0.0" address inside a container will be accessible - from the network. Cannot be updated. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying + this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: description: ContainerPort represents a network port in a single container. @@ -2568,9 +2690,7 @@ spec: This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use - the pod's ephemeralcontainers subresource. This field - is beta-level and available on clusters that haven't - disabled the EphemeralContainers feature gate. + the pod's ephemeralcontainers subresource. items: description: "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated @@ -2581,9 +2701,7 @@ spec: if an ephemeral container causes the Pod to exceed its resource allocation. \n To add an ephemeral container, use the ephemeralcontainers subresource of an existing - Pod. Ephemeral containers may not be removed or restarted. - \n This is a beta feature available on clusters that - haven't disabled the EphemeralContainers feature gate." + Pod. Ephemeral containers may not be removed or restarted." properties: args: description: 'Arguments to the entrypoint. The image''s @@ -3955,6 +4073,20 @@ spec: description: 'Use the host''s pid namespace. Optional: Default to false.' type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: + Default to true. If set to true or not present, the + pod will be run in the host user namespace, useful for + when the pod needs a feature only available to the host + user namespace, such as loading a kernel module with + CAP_SYS_MODULE. When set to false, a new userns is created + for the pod. Setting false is useful for mitigating + container breakout vulnerabilities even allowing users + to run their containers as root without actually having + root privileges on the host. This field is alpha-level + and is only honored by servers that enable the UserNamespacesSupport + feature.' + type: boolean hostname: description: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined @@ -4606,13 +4738,13 @@ spec: type: string ports: description: List of ports to expose from the container. - Exposing a port here gives the system additional - information about the network connections a container - uses, but is primarily informational. Not specifying - a port here DOES NOT prevent that port from being - exposed. Any port which is listening on the default - "0.0.0.0" address inside a container will be accessible - from the network. Cannot be updated. + Not specifying a port here DOES NOT prevent that + port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container + will be accessible from the network. Modifying + this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: description: ContainerPort represents a network port in a single container. @@ -5362,19 +5494,18 @@ spec: this is set. \n If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions \n If the OS field is set to windows, following fields - must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - - spec.shareProcessNamespace - spec.securityContext.runAsUser - - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - - spec.containers[*].securityContext.seLinuxOptions + must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile + - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy + - spec.securityContext.sysctls - spec.shareProcessNamespace + - spec.securityContext.runAsUser - spec.securityContext.runAsGroup + - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - - spec.containers[*].securityContext.runAsGroup This - is a beta field and requires the IdentifyPodOS feature" + - spec.containers[*].securityContext.runAsGroup" properties: name: description: 'Name is the name of the operating system. @@ -5810,6 +5941,21 @@ spec: The requirements are ANDed. type: object type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label + keys to select the pods over which spreading will + be calculated. The keys are used to lookup values + from the incoming pod labels, those key-value + labels are ANDed with labelSelector to select + the group of existing pods over which spreading + will be calculated for the incoming pod. Keys + that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match + against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic maxSkew: description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, @@ -5858,11 +6004,35 @@ spec: new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three - zones, it will violate MaxSkew. \n This is an - alpha field and requires enabling MinDomainsInPodTopologySpread - feature gate." + zones, it will violate MaxSkew. \n This is a beta + field and requires the MinDomainsInPodTopologySpread + feature gate to be enabled (enabled by default)." format: int32 type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we + will treat Pod's nodeAffinity/nodeSelector when + calculating pod topology spread skew. Options + are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a alpha-level feature + enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we + will treat node taints when calculating pod topology + spread skew. Options are: - Honor: nodes without + taints, along with tainted nodes for which the + incoming pod has a toleration, are included. - + Ignore: node taints are ignored. All nodes are + included. \n If this value is nil, the behavior + is equivalent to the Ignore policy. This is a + alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical @@ -5871,12 +6041,12 @@ spec: try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain - as a domain whose nodes match the node selector. - e.g. If TopologyKey is "kubernetes.io/hostname", - each Node is a domain of that topology. And, if - TopologyKey is "topology.kubernetes.io/zone", - each zone is a domain of that topology. It's a - required field. + as a domain whose nodes meet the requirements + of nodeAffinityPolicy and nodeTaintsPolicy. e.g. + If TopologyKey is "kubernetes.io/hostname", each + Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is + a domain of that topology. It's a required field. type: string whenUnsatisfiable: description: 'WhenUnsatisfiable indicates how to diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index 69d36068457..22925dcd715 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -82,7 +82,7 @@ spec: properties: scaleDown: description: scaleDown is scaling policy for scaling Down. - If not set, the default value is to allow to scale in + If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used). @@ -132,8 +132,8 @@ spec: StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - - For scale out: 0 (i.e. no stabilization is done). - - For scale in: 300 (i.e. the stabilization window + - For scale up: 0 (i.e. no stabilization is done). + - For scale down: 300 (i.e. the stabilization window is 300 seconds long).' format: int32 type: integer @@ -189,8 +189,8 @@ spec: StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - - For scale out: 0 (i.e. no stabilization is done). - - For scale in: 300 (i.e. the stabilization window + - For scale up: 0 (i.e. no stabilization is done). + - For scale down: 300 (i.e. the stabilization window is 300 seconds long).' format: int32 type: integer diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 77c0cd34eaf..1ca9f662392 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,5 +1,6 @@ resources: - manager.yaml +- service.yaml apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization diff --git a/config/manager/service.yaml b/config/manager/service.yaml new file mode 100644 index 00000000000..ca7887769c5 --- /dev/null +++ b/config/manager/service.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: keda-operator + app.kubernetes.io/version: latest + app.kubernetes.io/part-of: keda-operator + name: keda-operator + namespace: keda +spec: + ports: + - name: metricsservice + port: 9666 + targetPort: 9666 + - name: metrics + port: 8080 + targetPort: 8080 + selector: + app: keda-operator diff --git a/config/metrics-server/deployment.yaml b/config/metrics-server/deployment.yaml index f18fcccd3cf..42016989d61 100644 --- a/config/metrics-server/deployment.yaml +++ b/config/metrics-server/deployment.yaml @@ -48,6 +48,10 @@ spec: env: - name: WATCH_NAMESPACE value: "" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: KEDA_HTTP_DEFAULT_TIMEOUT value: "" args: diff --git a/controllers/keda/hpa.go b/controllers/keda/hpa.go index 3832cca665e..85e1f37ab56 100644 --- a/controllers/keda/hpa.go +++ b/controllers/keda/hpa.go @@ -197,7 +197,7 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context, var externalMetricNames []string var resourceMetricNames []string - cache, err := r.scaleHandler.GetScalersCache(ctx, scaledObject) + cache, err := r.ScaleHandler.GetScalersCache(ctx, scaledObject) if err != nil { logger.Error(err, "Error getting scalers") return nil, err diff --git a/controllers/keda/hpa_test.go b/controllers/keda/hpa_test.go index 898587eb8cd..7cef70233db 100644 --- a/controllers/keda/hpa_test.go +++ b/controllers/keda/hpa_test.go @@ -54,7 +54,7 @@ var _ = Describe("hpa", func() { logger = logr.Discard() reconciler = ScaledObjectReconciler{ Client: client, - scaleHandler: scaleHandler, + ScaleHandler: scaleHandler, } }) diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index 1a928a533d0..24a4b4390f6 100644 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/go-logr/logr" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -32,8 +31,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" @@ -42,7 +39,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -65,16 +61,14 @@ import ( // ScaledObjectReconciler reconciles a ScaledObject object type ScaledObjectReconciler struct { - Client client.Client - Scheme *runtime.Scheme - GlobalHTTPTimeout time.Duration - Recorder record.EventRecorder + Client client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + ScaleClient scale.ScalesGetter + ScaleHandler scaling.ScaleHandler - scaleClient scale.ScalesGetter restMapper meta.RESTMapper scaledObjectsGenerations *sync.Map - scaleHandler scaling.ScaleHandler - kubeVersion kedautil.K8sVersion } type scaledObjectMetricsData struct { @@ -102,33 +96,24 @@ func init() { // SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance. func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { - setupLog := log.Log.WithName("setup") + r.restMapper = mgr.GetRESTMapper() + r.scaledObjectsGenerations = &sync.Map{} - // create Discovery clientset - // TODO If we need to increase the QPS of scaling API calls, copy and tweak this RESTConfig. - clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) - if err != nil { - setupLog.Error(err, "Not able to create Discovery clientset") - return err + if r.ScaleHandler == nil { + return fmt.Errorf("ScaledObjectReconciler.ScaleHandler is not initialized") } - - // Find out Kubernetes version - version, err := clientset.ServerVersion() - if err == nil { - r.kubeVersion = kedautil.NewK8sVersion(version) - setupLog.Info("Running on Kubernetes "+r.kubeVersion.PrettyVersion, "version", version) - } else { - setupLog.Error(err, "Not able to get Kubernetes version") + if r.Client == nil { + return fmt.Errorf("ScaledObjectReconciler.Client is not initialized") + } + if r.ScaleClient == nil { + return fmt.Errorf("ScaledObjectReconciler.ScaleClient is not initialized") + } + if r.Scheme == nil { + return fmt.Errorf("ScaledObjectReconciler.Scheme is not initialized") + } + if r.Recorder == nil { + return fmt.Errorf("ScaledObjectReconciler.Recorder is not initialized") } - - // Create Scale Client - scaleClient := initScaleClient(mgr, clientset) - r.scaleClient = scaleClient - - // Init the rest of ScaledObjectReconciler - r.restMapper = mgr.GetRESTMapper() - r.scaledObjectsGenerations = &sync.Map{} - r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), r.scaleClient, mgr.GetScheme(), r.GlobalHTTPTimeout, r.Recorder) // Start controller return ctrl.NewControllerManagedBy(mgr). @@ -147,15 +132,6 @@ func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options cont Complete(r) } -func initScaleClient(mgr manager.Manager, clientset *discovery.DiscoveryClient) scale.ScalesGetter { - scaleKindResolver := scale.NewDiscoveryScaleKindResolver(clientset) - return scale.New( - clientset.RESTClient(), mgr.GetRESTMapper(), - dynamic.LegacyAPIPathResolverFunc, - scaleKindResolver, - ) -} - // Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any). func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { reqLogger := log.FromContext(ctx) @@ -319,7 +295,7 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte // not cached, let's try to detect /scale subresource // also rechecks when we need to update the status. var errScale error - scale, errScale = (r.scaleClient).Scales(scaledObject.Namespace).Get(ctx, gr, scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + scale, errScale = (r.ScaleClient).Scales(scaledObject.Namespace).Get(ctx, gr, scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) if errScale != nil { // not able to get /scale subresource -> let's check if the resource even exist in the cluster unstruct := &unstructured.Unstructured{} @@ -467,7 +443,7 @@ func (r *ScaledObjectReconciler) requestScaleLoop(ctx context.Context, logger lo return err } - if err = r.scaleHandler.HandleScalableObject(ctx, scaledObject); err != nil { + if err = r.ScaleHandler.HandleScalableObject(ctx, scaledObject); err != nil { return err } @@ -485,7 +461,7 @@ func (r *ScaledObjectReconciler) stopScaleLoop(ctx context.Context, logger logr. return err } - if err := r.scaleHandler.DeleteScalableObject(ctx, scaledObject); err != nil { + if err := r.ScaleHandler.DeleteScalableObject(ctx, scaledObject); err != nil { return err } // delete ScaledObject's current Generation diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index c5a24d41639..746697f69e5 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -76,7 +76,7 @@ var _ = Describe("ScaledObjectController", func() { mockStatusWriter = mock_client.NewMockStatusWriter(ctrl) metricNameTestReconciler = ScaledObjectReconciler{ - scaleHandler: mockScaleHandler, + ScaleHandler: mockScaleHandler, Client: mockClient, } }) diff --git a/controllers/keda/scaledobject_finalizer.go b/controllers/keda/scaledobject_finalizer.go index c0fa1aeab62..4c26caaa844 100644 --- a/controllers/keda/scaledobject_finalizer.go +++ b/controllers/keda/scaledobject_finalizer.go @@ -53,7 +53,7 @@ func (r *ScaledObjectReconciler) finalizeScaledObject(ctx context.Context, logge logger.V(1).Info("Failed to restore scaleTarget's replica count back to the original, the scaling haven't been probably initialized yet.") } else { // We have enough information about the scaleTarget, let's proceed. - scale, err := r.scaleClient.Scales(scaledObject.Namespace).Get(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) + scale, err := r.ScaleClient.Scales(scaledObject.Namespace).Get(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { logger.V(1).Info("Failed to get scaleTarget's scale status, because it was probably deleted", "error", err) @@ -62,7 +62,7 @@ func (r *ScaledObjectReconciler) finalizeScaledObject(ctx context.Context, logge } } else { scale.Spec.Replicas = *scaledObject.Status.OriginalReplicaCount - _, err = r.scaleClient.Scales(scaledObject.Namespace).Update(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scale, metav1.UpdateOptions{}) + _, err = r.ScaleClient.Scales(scaledObject.Namespace).Update(ctx, scaledObject.Status.ScaleTargetGVKR.GroupResource(), scale, metav1.UpdateOptions{}) if err != nil { logger.Error(err, "Failed to restore scaleTarget's replica count back to the original", "finalizer", scaledObjectFinalizer) } diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 62ecb1a60cb..be88c4d53d3 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -20,6 +20,7 @@ import ( "context" "path/filepath" "testing" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -34,6 +35,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/k8s" + "github.com/kedacore/keda/v2/pkg/scaling" // +kubebuilder:scaffold:imports ) @@ -81,10 +84,15 @@ var _ = BeforeSuite(func(done Done) { }) Expect(err).ToNot(HaveOccurred()) + scaleClient, _, err := k8s.InitScaleClient(k8sManager) + Expect(err).ToNot(HaveOccurred()) + err = (&ScaledObjectReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("keda-operator"), + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("keda-operator"), + ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator")), + ScaleClient: scaleClient, }).SetupWithManager(k8sManager, controller.Options{}) Expect(err).ToNot(HaveOccurred()) diff --git a/main.go b/main.go index e7ba811375e..93d719885ad 100644 --- a/main.go +++ b/main.go @@ -35,6 +35,9 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" kedacontrollers "github.com/kedacore/keda/v2/controllers/keda" + "github.com/kedacore/keda/v2/pkg/k8s" + "github.com/kedacore/keda/v2/pkg/metricsservice" + "github.com/kedacore/keda/v2/pkg/scaling" kedautil "github.com/kedacore/keda/v2/pkg/util" "github.com/kedacore/keda/v2/version" //+kubebuilder:scaffold:imports @@ -64,23 +67,25 @@ func getWatchNamespace() (string, error) { func main() { var metricsAddr string - var enableLeaderElection bool var probeAddr string + var metricsServiceAddr string + var enableLeaderElection bool var adapterClientRequestQPS float32 var adapterClientRequestBurst int var disableCompression bool pflag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") pflag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + pflag.StringVar(&metricsServiceAddr, "metrics-service-bind-address", ":9666", "The address the gRPRC Metrics Service endpoint binds to.") pflag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") pflag.Float32Var(&adapterClientRequestQPS, "kube-api-qps", 20.0, "Set the QPS rate for throttling requests sent to the apiserver") pflag.IntVar(&adapterClientRequestBurst, "kube-api-burst", 30, "Set the burst for throttling requests sent to the apiserver") pflag.BoolVar(&disableCompression, "disable-compression", true, "Disable response compression for k8s restAPI in client-go. ") + opts := zap.Options{} opts.BindFlags(flag.CommandLine) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) @@ -134,30 +139,39 @@ func main() { // default to 3 seconds if they don't pass the env var globalHTTPTimeoutMS, err := kedautil.ResolveOsEnvInt("KEDA_HTTP_DEFAULT_TIMEOUT", 3000) if err != nil { - setupLog.Error(err, "Invalid KEDA_HTTP_DEFAULT_TIMEOUT") + setupLog.Error(err, "invalid KEDA_HTTP_DEFAULT_TIMEOUT") os.Exit(1) } scaledObjectMaxReconciles, err := kedautil.ResolveOsEnvInt("KEDA_SCALEDOBJECT_CTRL_MAX_RECONCILES", 5) if err != nil { - setupLog.Error(err, "Invalid KEDA_SCALEDOBJECT_CTRL_MAX_RECONCILES") + setupLog.Error(err, "invalid KEDA_SCALEDOBJECT_CTRL_MAX_RECONCILES") os.Exit(1) } scaledJobMaxReconciles, err := kedautil.ResolveOsEnvInt("KEDA_SCALEDJOB_CTRL_MAX_RECONCILES", 1) if err != nil { - setupLog.Error(err, "Invalid KEDA_SCALEDJOB_CTRL_MAX_RECONCILES") + setupLog.Error(err, "invalid KEDA_SCALEDJOB_CTRL_MAX_RECONCILES") os.Exit(1) } globalHTTPTimeout := time.Duration(globalHTTPTimeoutMS) * time.Millisecond eventRecorder := mgr.GetEventRecorderFor("keda-operator") + scaleClient, kubeVersion, err := k8s.InitScaleClient(mgr) + if err != nil { + setupLog.Error(err, "unable to init scale client") + os.Exit(1) + } + + scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder) + if err = (&kedacontrollers.ScaledObjectReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - GlobalHTTPTimeout: globalHTTPTimeout, - Recorder: eventRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: eventRecorder, + ScaleClient: scaleClient, + ScaleHandler: scaledHandler, }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: scaledObjectMaxReconciles}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ScaledObject") os.Exit(1) @@ -196,11 +210,18 @@ func main() { os.Exit(1) } + grpcServer := metricsservice.NewGrpcServer(&scaledHandler, metricsServiceAddr) + if err := mgr.Add(&grpcServer); err != nil { + setupLog.Error(err, "unable to set up Metrics Service gRPC server") + os.Exit(1) + } + setupLog.Info("Starting manager") setupLog.Info(fmt.Sprintf("KEDA Version: %s", version.Version)) setupLog.Info(fmt.Sprintf("Git Commit: %s", version.GitCommit)) setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) + setupLog.Info(fmt.Sprintf("Running on Kubernetes %s", kubeVersion.PrettyVersion), "version", kubeVersion.Version) if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/pkg/provider/fallback.go b/pkg/fallback/fallback.go similarity index 72% rename from pkg/provider/fallback.go rename to pkg/fallback/fallback.go index ba25facca04..2cc3b740f2d 100644 --- a/pkg/provider/fallback.go +++ b/pkg/fallback/fallback.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The KEDA Authors +Copyright 2022 The KEDA Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package provider +package fallback import ( "context" "fmt" + "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,7 +30,7 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) -func isFallbackEnabled(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool { +func isFallbackEnabled(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool { if scaledObject.Spec.Fallback == nil { return false } @@ -42,7 +43,7 @@ func isFallbackEnabled(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.Me return true } -func (p *KedaProvider) getMetricsWithFallback(ctx context.Context, metrics []external_metrics.ExternalMetricValue, suppressedError error, metricName string, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) ([]external_metrics.ExternalMetricValue, error) { +func GetMetricsWithFallback(ctx context.Context, client runtimeclient.Client, logger logr.Logger, metrics []external_metrics.ExternalMetricValue, suppressedError error, metricName string, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) ([]external_metrics.ExternalMetricValue, error) { status := scaledObject.Status.DeepCopy() initHealthStatus(status) @@ -54,7 +55,7 @@ func (p *KedaProvider) getMetricsWithFallback(ctx context.Context, metrics []ext healthStatus.Status = kedav1alpha1.HealthStatusHappy status.Health[metricName] = *healthStatus - p.updateStatus(ctx, scaledObject, status, metricSpec) + updateStatus(ctx, client, logger, scaledObject, status, metricSpec) return metrics, nil } @@ -62,23 +63,23 @@ func (p *KedaProvider) getMetricsWithFallback(ctx context.Context, metrics []ext *healthStatus.NumberOfFailures++ status.Health[metricName] = *healthStatus - p.updateStatus(ctx, scaledObject, status, metricSpec) + updateStatus(ctx, client, logger, scaledObject, status, metricSpec) switch { - case !isFallbackEnabled(scaledObject, metricSpec): + case !isFallbackEnabled(logger, scaledObject, metricSpec): return nil, suppressedError case !validateFallback(scaledObject): logger.Info("Failed to validate ScaledObject Spec. Please check that parameters are positive integers") return nil, suppressedError case *healthStatus.NumberOfFailures > scaledObject.Spec.Fallback.FailureThreshold: - return doFallback(scaledObject, metricSpec, metricName, suppressedError), nil + return doFallback(logger, scaledObject, metricSpec, metricName, suppressedError), nil default: return nil, suppressedError } } -func fallbackExistsInScaledObject(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool { - if !isFallbackEnabled(scaledObject, metricSpec) || !validateFallback(scaledObject) { +func fallbackExistsInScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec) bool { + if !isFallbackEnabled(logger, scaledObject, metricSpec) || !validateFallback(scaledObject) { return false } @@ -96,7 +97,7 @@ func validateFallback(scaledObject *kedav1alpha1.ScaledObject) bool { scaledObject.Spec.Fallback.Replicas >= 0 } -func doFallback(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec, metricName string, suppressedError error) []external_metrics.ExternalMetricValue { +func doFallback(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpec, metricName string, suppressedError error) []external_metrics.ExternalMetricValue { replicas := int64(scaledObject.Spec.Fallback.Replicas) normalisationValue, _ := metricSpec.External.Target.AverageValue.AsInt64() metric := external_metrics.ExternalMetricValue{ @@ -110,17 +111,17 @@ func doFallback(scaledObject *kedav1alpha1.ScaledObject, metricSpec v2.MetricSpe return fallbackMetrics } -func (p *KedaProvider) updateStatus(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, status *kedav1alpha1.ScaledObjectStatus, metricSpec v2.MetricSpec) { +func updateStatus(ctx context.Context, client runtimeclient.Client, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, status *kedav1alpha1.ScaledObjectStatus, metricSpec v2.MetricSpec) { patch := runtimeclient.MergeFrom(scaledObject.DeepCopy()) - if fallbackExistsInScaledObject(scaledObject, metricSpec) { + if fallbackExistsInScaledObject(logger, scaledObject, metricSpec) { status.Conditions.SetFallbackCondition(metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object") } else { status.Conditions.SetFallbackCondition(metav1.ConditionFalse, "NoFallbackFound", "No fallbacks are active on this scaled object") } scaledObject.Status = *status - err := p.client.Status().Patch(ctx, scaledObject, patch) + err := client.Status().Patch(ctx, scaledObject, patch) if err != nil { logger.Error(err, "Failed to patch ScaledObjects Status") } diff --git a/pkg/provider/fallback_test.go b/pkg/fallback/fallback_test.go similarity index 85% rename from pkg/provider/fallback_test.go rename to pkg/fallback/fallback_test.go index 9f9aa262416..2103d441ead 100644 --- a/pkg/provider/fallback_test.go +++ b/pkg/fallback/fallback_test.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The KEDA Authors +Copyright 2022 The KEDA Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package provider +package fallback import ( "context" @@ -36,7 +36,6 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/mock/mock_client" mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler" - "github.com/kedacore/keda/v2/pkg/mock/mock_scaling" ) const metricName = "some_metric_name" @@ -51,22 +50,15 @@ func TestFallback(t *testing.T) { var _ = Describe("fallback", func() { var ( - scaleHandler *mock_scaling.MockScaleHandler - client *mock_client.MockClient - providerUnderTest *KedaProvider - scaler *mock_scalers.MockScaler - ctrl *gomock.Controller + client *mock_client.MockClient + scaler *mock_scalers.MockScaler + ctrl *gomock.Controller + logger logr.Logger ) BeforeEach(func() { ctrl = gomock.NewController(GinkgoT()) - scaleHandler = mock_scaling.NewMockScaleHandler(ctrl) client = mock_client.NewMockClient(ctrl) - providerUnderTest = &KedaProvider{ - client: client, - scaleHandler: scaleHandler, - watchedNamespace: "", - } scaler = mock_scalers.NewMockScaler(ctrl) logger = logr.Discard() @@ -84,8 +76,8 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - metrics, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) value, _ := metrics[0].Value.AsInt64() @@ -115,8 +107,8 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - metrics, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) value, _ := metrics[0].Value.AsInt64() @@ -125,21 +117,21 @@ var _ = Describe("fallback", func() { }) It("should propagate the error when fallback is disabled", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) so := buildScaledObject(nil, nil) metricSpec := createMetricSpec(3) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - _, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(Equal("Some error")) }) It("should bump the number of failures when metrics call fails", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(0) so := buildScaledObject( @@ -160,8 +152,8 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - _, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(Equal("Some error")) @@ -169,7 +161,7 @@ var _ = Describe("fallback", func() { }) It("should return a normalised metric when number of failures are beyond threshold", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(3) expectedMetricValue := int64(100) @@ -190,8 +182,8 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - metrics, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) value, _ := metrics[0].Value.AsInt64() @@ -217,12 +209,12 @@ var _ = Describe("fallback", func() { }, } - isEnabled := isFallbackEnabled(so, metricsSpec) + isEnabled := isFallbackEnabled(logger, so, metricsSpec) Expect(isEnabled).Should(BeFalse()) }) It("should ignore error if we fail to update kubernetes status", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(3) expectedMetricValue := int64(100) @@ -246,8 +238,8 @@ var _ = Describe("fallback", func() { statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("Some error")) client.EXPECT().Status().Return(statusWriter) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - metrics, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + metrics, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) value, _ := metrics[0].Value.AsInt64() @@ -256,7 +248,7 @@ var _ = Describe("fallback", func() { }) It("should return error when fallback is enabled but scaledobject has invalid parameter", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(3) so := buildScaledObject( @@ -276,15 +268,15 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - _, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(Equal("Some error")) }) It("should set the fallback condition when a fallback exists in the scaled object", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(3) failingNumberOfFailures := int32(6) anotherMetricName := "another metric name" @@ -310,15 +302,15 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - _, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ToNot(HaveOccurred()) condition := so.Status.Conditions.GetFallbackCondition() Expect(condition.IsTrue()).Should(BeTrue()) }) It("should set the fallback condition to false if the config is invalid", func() { - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return(nil, errors.New("Some error")) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return(nil, errors.New("Some error")) startingNumberOfFailures := int32(3) failingNumberOfFailures := int32(6) anotherMetricName := "another metric name" @@ -344,8 +336,8 @@ var _ = Describe("fallback", func() { metricSpec := createMetricSpec(10) expectStatusPatch(ctrl, client) - metrics, err := scaler.GetMetrics(context.Background(), metricName, nil) - _, err = providerUnderTest.getMetricsWithFallback(context.Background(), metrics, err, metricName, so, metricSpec) + metrics, err := scaler.GetMetrics(context.Background(), metricName) + _, err = GetMetricsWithFallback(context.Background(), client, logger, metrics, err, metricName, so, metricSpec) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(Equal("Some error")) condition := so.Status.Conditions.GetFallbackCondition() @@ -433,7 +425,7 @@ func primeGetMetrics(scaler *mock_scalers.MockScaler, value int64) { Timestamp: metav1.Now(), } - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName), gomock.Any()).Return([]external_metrics.ExternalMetricValue{expectedMetric}, nil) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Eq(metricName)).Return([]external_metrics.ExternalMetricValue{expectedMetric}, nil) } func createMetricSpec(averageValue int) v2.MetricSpec { diff --git a/pkg/k8s/scaleclient.go b/pkg/k8s/scaleclient.go new file mode 100644 index 00000000000..8564ff4255f --- /dev/null +++ b/pkg/k8s/scaleclient.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/scale" + ctrl "sigs.k8s.io/controller-runtime" + + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +var log = ctrl.Log.WithName("scaleclient") + +// InitScaleClient initializes scale client and returns k8s version +func InitScaleClient(mgr ctrl.Manager) (scale.ScalesGetter, kedautil.K8sVersion, error) { + kubeVersion := kedautil.K8sVersion{} + + // create Discovery clientset + // TODO If we need to increase the QPS of scaling API calls, copy and tweak this RESTConfig. + clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig()) + if err != nil { + log.Error(err, "not able to create Discovery clientset") + return nil, kubeVersion, err + } + + // Find out Kubernetes version + version, err := clientset.ServerVersion() + if err == nil { + kubeVersion = kedautil.NewK8sVersion(version) + } else { + log.Error(err, "not able to get Kubernetes version") + return nil, kubeVersion, err + } + + return scale.New( + clientset.RESTClient(), mgr.GetRESTMapper(), + dynamic.LegacyAPIPathResolverFunc, + scale.NewDiscoveryScaleKindResolver(clientset), + ), kubeVersion, nil +} diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go new file mode 100644 index 00000000000..2ec2d937352 --- /dev/null +++ b/pkg/metricsservice/api/metrics.pb.go @@ -0,0 +1,545 @@ +// +//Copyright 2022 The KEDA Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.9 +// source: metrics.proto + +package api + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + v1beta1 "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScaledObjectRef struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + MetricName string `protobuf:"bytes,3,opt,name=metricName,proto3" json:"metricName,omitempty"` +} + +func (x *ScaledObjectRef) Reset() { + *x = ScaledObjectRef{} + if protoimpl.UnsafeEnabled { + mi := &file_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScaledObjectRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaledObjectRef) ProtoMessage() {} + +func (x *ScaledObjectRef) ProtoReflect() protoreflect.Message { + mi := &file_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaledObjectRef.ProtoReflect.Descriptor instead. +func (*ScaledObjectRef) Descriptor() ([]byte, []int) { + return file_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *ScaledObjectRef) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ScaledObjectRef) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ScaledObjectRef) GetMetricName() string { + if x != nil { + return x.MetricName + } + return "" +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics *v1beta1.ExternalMetricValueList `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics,omitempty"` + PromMetrics *PromMetricsMsg `protobuf:"bytes,2,opt,name=promMetrics,proto3" json:"promMetrics,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_metrics_proto_rawDescGZIP(), []int{1} +} + +func (x *Response) GetMetrics() *v1beta1.ExternalMetricValueList { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *Response) GetPromMetrics() *PromMetricsMsg { + if x != nil { + return x.PromMetrics + } + return nil +} + +// [DEPRECATED] PromMetricsMsg provides metrics for deprecated Prometheus Metrics in Metrics Server +type PromMetricsMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScaledObjectErr bool `protobuf:"varint,1,opt,name=scaledObjectErr,proto3" json:"scaledObjectErr,omitempty"` + ScalerMetric []*ScalerMetricMsg `protobuf:"bytes,2,rep,name=scalerMetric,proto3" json:"scalerMetric,omitempty"` + ScalerError []*ScalerErrorMsg `protobuf:"bytes,3,rep,name=scalerError,proto3" json:"scalerError,omitempty"` +} + +func (x *PromMetricsMsg) Reset() { + *x = PromMetricsMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PromMetricsMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromMetricsMsg) ProtoMessage() {} + +func (x *PromMetricsMsg) ProtoReflect() protoreflect.Message { + mi := &file_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromMetricsMsg.ProtoReflect.Descriptor instead. +func (*PromMetricsMsg) Descriptor() ([]byte, []int) { + return file_metrics_proto_rawDescGZIP(), []int{2} +} + +func (x *PromMetricsMsg) GetScaledObjectErr() bool { + if x != nil { + return x.ScaledObjectErr + } + return false +} + +func (x *PromMetricsMsg) GetScalerMetric() []*ScalerMetricMsg { + if x != nil { + return x.ScalerMetric + } + return nil +} + +func (x *PromMetricsMsg) GetScalerError() []*ScalerErrorMsg { + if x != nil { + return x.ScalerError + } + return nil +} + +type ScalerMetricMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScalerName string `protobuf:"bytes,1,opt,name=scalerName,proto3" json:"scalerName,omitempty"` + ScalerIndex int32 `protobuf:"varint,2,opt,name=scalerIndex,proto3" json:"scalerIndex,omitempty"` + MetricName string `protobuf:"bytes,3,opt,name=metricName,proto3" json:"metricName,omitempty"` + MetricValue float32 `protobuf:"fixed32,4,opt,name=metricValue,proto3" json:"metricValue,omitempty"` +} + +func (x *ScalerMetricMsg) Reset() { + *x = ScalerMetricMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScalerMetricMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScalerMetricMsg) ProtoMessage() {} + +func (x *ScalerMetricMsg) ProtoReflect() protoreflect.Message { + mi := &file_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScalerMetricMsg.ProtoReflect.Descriptor instead. +func (*ScalerMetricMsg) Descriptor() ([]byte, []int) { + return file_metrics_proto_rawDescGZIP(), []int{3} +} + +func (x *ScalerMetricMsg) GetScalerName() string { + if x != nil { + return x.ScalerName + } + return "" +} + +func (x *ScalerMetricMsg) GetScalerIndex() int32 { + if x != nil { + return x.ScalerIndex + } + return 0 +} + +func (x *ScalerMetricMsg) GetMetricName() string { + if x != nil { + return x.MetricName + } + return "" +} + +func (x *ScalerMetricMsg) GetMetricValue() float32 { + if x != nil { + return x.MetricValue + } + return 0 +} + +type ScalerErrorMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScalerName string `protobuf:"bytes,1,opt,name=scalerName,proto3" json:"scalerName,omitempty"` + ScalerIndex int32 `protobuf:"varint,2,opt,name=scalerIndex,proto3" json:"scalerIndex,omitempty"` + MetricName string `protobuf:"bytes,3,opt,name=metricName,proto3" json:"metricName,omitempty"` + Error bool `protobuf:"varint,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ScalerErrorMsg) Reset() { + *x = ScalerErrorMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScalerErrorMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScalerErrorMsg) ProtoMessage() {} + +func (x *ScalerErrorMsg) ProtoReflect() protoreflect.Message { + mi := &file_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScalerErrorMsg.ProtoReflect.Descriptor instead. +func (*ScalerErrorMsg) Descriptor() ([]byte, []int) { + return file_metrics_proto_rawDescGZIP(), []int{4} +} + +func (x *ScalerErrorMsg) GetScalerName() string { + if x != nil { + return x.ScalerName + } + return "" +} + +func (x *ScalerErrorMsg) GetScalerIndex() int32 { + if x != nil { + return x.ScalerIndex + } + return 0 +} + +func (x *ScalerErrorMsg) GetMetricName() string { + if x != nil { + return x.MetricName + } + return "" +} + +func (x *ScalerErrorMsg) GetError() bool { + if x != nil { + return x.Error + } + return false +} + +var File_metrics_proto protoreflect.FileDescriptor + +var file_metrics_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x03, 0x61, 0x70, 0x69, 0x1a, 0x40, 0x6b, 0x38, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0f, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x64, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xa6, 0x01, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x6b, 0x38, 0x73, 0x2e, + 0x69, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x35, 0x0a, + 0x0b, 0x70, 0x72, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x4d, 0x73, 0x67, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6d, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x4d, 0x73, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x72, + 0x72, 0x12, 0x38, 0x0a, 0x0c, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x63, + 0x61, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x52, 0x0c, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x0b, 0x73, + 0x63, 0x61, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x52, 0x0b, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x53, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x63, 0x61, 0x6c, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x0e, 0x53, + 0x63, 0x61, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x1e, 0x0a, + 0x0a, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1e, 0x0a, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0x45, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x63, 0x61, 0x6c, + 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x66, 0x1a, 0x0d, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x07, 0x5a, 0x05, + 0x2e, 0x3b, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_metrics_proto_rawDescOnce sync.Once + file_metrics_proto_rawDescData = file_metrics_proto_rawDesc +) + +func file_metrics_proto_rawDescGZIP() []byte { + file_metrics_proto_rawDescOnce.Do(func() { + file_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_metrics_proto_rawDescData) + }) + return file_metrics_proto_rawDescData +} + +var file_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_metrics_proto_goTypes = []interface{}{ + (*ScaledObjectRef)(nil), // 0: api.ScaledObjectRef + (*Response)(nil), // 1: api.Response + (*PromMetricsMsg)(nil), // 2: api.PromMetricsMsg + (*ScalerMetricMsg)(nil), // 3: api.ScalerMetricMsg + (*ScalerErrorMsg)(nil), // 4: api.ScalerErrorMsg + (*v1beta1.ExternalMetricValueList)(nil), // 5: k8s.io.metrics.pkg.apis.external_metrics.v1beta1.ExternalMetricValueList +} +var file_metrics_proto_depIdxs = []int32{ + 5, // 0: api.Response.metrics:type_name -> k8s.io.metrics.pkg.apis.external_metrics.v1beta1.ExternalMetricValueList + 2, // 1: api.Response.promMetrics:type_name -> api.PromMetricsMsg + 3, // 2: api.PromMetricsMsg.scalerMetric:type_name -> api.ScalerMetricMsg + 4, // 3: api.PromMetricsMsg.scalerError:type_name -> api.ScalerErrorMsg + 0, // 4: api.MetricsService.GetMetrics:input_type -> api.ScaledObjectRef + 1, // 5: api.MetricsService.GetMetrics:output_type -> api.Response + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_metrics_proto_init() } +func file_metrics_proto_init() { + if File_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScaledObjectRef); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PromMetricsMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScalerMetricMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScalerErrorMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_metrics_proto_goTypes, + DependencyIndexes: file_metrics_proto_depIdxs, + MessageInfos: file_metrics_proto_msgTypes, + }.Build() + File_metrics_proto = out.File + file_metrics_proto_rawDesc = nil + file_metrics_proto_goTypes = nil + file_metrics_proto_depIdxs = nil +} diff --git a/pkg/metricsservice/api/metrics.proto b/pkg/metricsservice/api/metrics.proto new file mode 100644 index 00000000000..59226eeeed1 --- /dev/null +++ b/pkg/metricsservice/api/metrics.proto @@ -0,0 +1,59 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +package api; +option go_package = ".;api"; + +import "k8s.io/metrics/pkg/apis/external_metrics/v1beta1/generated.proto"; + +service MetricsService { + rpc GetMetrics (ScaledObjectRef) returns (Response) {}; +} + +message ScaledObjectRef { + string name = 1; + string namespace = 2; + string metricName = 3; +} + + +message Response { + k8s.io.metrics.pkg.apis.external_metrics.v1beta1.ExternalMetricValueList metrics = 1; + PromMetricsMsg promMetrics = 2; +} + +// [DEPRECATED] PromMetricsMsg provides metrics for deprecated Prometheus Metrics in Metrics Server +message PromMetricsMsg { + bool scaledObjectErr = 1; + repeated ScalerMetricMsg scalerMetric = 2; + repeated ScalerErrorMsg scalerError = 3; +} + +message ScalerMetricMsg { + string scalerName = 1; + int32 scalerIndex = 2; + string metricName = 3; + float metricValue = 4; +} + +message ScalerErrorMsg { + string scalerName = 1; + int32 scalerIndex = 2; + string metricName = 3; + bool error = 4; +} diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go new file mode 100644 index 00000000000..01c0490e7fa --- /dev/null +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.9 +// source: metrics.proto + +package api + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MetricsServiceClient interface { + GetMetrics(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*Response, error) +} + +type metricsServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) GetMetrics(ctx context.Context, in *ScaledObjectRef, opts ...grpc.CallOption) (*Response, error) { + out := new(Response) + err := c.cc.Invoke(ctx, "/api.MetricsService/GetMetrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +// All implementations must embed UnimplementedMetricsServiceServer +// for forward compatibility +type MetricsServiceServer interface { + GetMetrics(context.Context, *ScaledObjectRef) (*Response, error) + mustEmbedUnimplementedMetricsServiceServer() +} + +// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (UnimplementedMetricsServiceServer) GetMetrics(context.Context, *ScaledObjectRef) (*Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMetrics not implemented") +} +func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {} + +// UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MetricsServiceServer will +// result in compilation errors. +type UnsafeMetricsServiceServer interface { + mustEmbedUnimplementedMetricsServiceServer() +} + +func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) { + s.RegisterService(&MetricsService_ServiceDesc, srv) +} + +func _MetricsService_GetMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ScaledObjectRef) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).GetMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/api.MetricsService/GetMetrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).GetMetrics(ctx, req.(*ScaledObjectRef)) + } + return interceptor(ctx, in, info, handler) +} + +// MetricsService_ServiceDesc is the grpc.ServiceDesc for MetricsService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MetricsService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "api.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMetrics", + Handler: _MetricsService_GetMetrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "metrics.proto", +} diff --git a/pkg/metricsservice/client.go b/pkg/metricsservice/client.go new file mode 100644 index 00000000000..61d75bc538e --- /dev/null +++ b/pkg/metricsservice/client.go @@ -0,0 +1,70 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsservice + +import ( + "context" + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "k8s.io/metrics/pkg/apis/external_metrics" + "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" + + "github.com/kedacore/keda/v2/pkg/metricsservice/api" +) + +type GrpcClient struct { + client api.MetricsServiceClient +} + +func NewGrpcClient(url string) (*GrpcClient, error) { + retryPolicy := `{ + "methodConfig": [{ + "timeout": "3s", + "waitForReady": true, + "retryPolicy": { + "InitialBackoff": ".25s", + "MaxBackoff": "2.0s", + "BackoffMultiplier": 2, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }]}` + + // TODO fix Transport layer - use TLS + conn, err := grpc.Dial(url, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(retryPolicy)) + if err != nil { + return nil, err + } + + return &GrpcClient{client: api.NewMetricsServiceClient(conn)}, nil +} + +func (c *GrpcClient) GetMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *api.PromMetricsMsg, error) { + response, err := c.client.GetMetrics(ctx, &api.ScaledObjectRef{Name: scaledObjectName, Namespace: scaledObjectNamespace, MetricName: metricName}) + if err != nil { + return nil, response.GetPromMetrics(), err + } + + extMetrics := &external_metrics.ExternalMetricValueList{} + err = v1beta1.Convert_v1beta1_ExternalMetricValueList_To_external_metrics_ExternalMetricValueList(response.GetMetrics(), extMetrics, nil) + if err != nil { + return nil, response.GetPromMetrics(), fmt.Errorf("error when converting metric values %s", err) + } + + return extMetrics, response.GetPromMetrics(), nil +} diff --git a/pkg/metricsservice/server.go b/pkg/metricsservice/server.go new file mode 100644 index 00000000000..2d6f6325ee0 --- /dev/null +++ b/pkg/metricsservice/server.go @@ -0,0 +1,112 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsservice + +import ( + "context" + "fmt" + "net" + + "google.golang.org/grpc" + "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/kedacore/keda/v2/pkg/metricsservice/api" + "github.com/kedacore/keda/v2/pkg/scaling" +) + +var log = ctrl.Log.WithName("grpc_server") + +type GrpcServer struct { + server *grpc.Server + address string + scalerHandler *scaling.ScaleHandler + api.UnimplementedMetricsServiceServer +} + +// GetMetrics returns metrics values in form of ExternalMetricValueList for specified ScaledObject reference +func (s *GrpcServer) GetMetrics(ctx context.Context, in *api.ScaledObjectRef) (*api.Response, error) { + v1beta1ExtMetrics := &v1beta1.ExternalMetricValueList{} + extMetrics, exportedMetrics, err := (*s.scalerHandler).GetExternalMetrics(ctx, in.Name, in.Namespace, in.MetricName) + if err != nil { + return nil, fmt.Errorf("error when getting metric values %s", err) + } + + err = v1beta1.Convert_external_metrics_ExternalMetricValueList_To_v1beta1_ExternalMetricValueList(extMetrics, v1beta1ExtMetrics, nil) + if err != nil { + return nil, fmt.Errorf("error when converting metric values %s", err) + } + + log.V(1).WithValues("scaledObjectName", in.Name, "scaledObjectNamespace", in.Namespace, "metrics", v1beta1ExtMetrics).Info("Providing metrics") + + return &api.Response{Metrics: v1beta1ExtMetrics, PromMetrics: exportedMetrics}, nil +} + +// NewGrpcServer creates a new instance of GrpcServer +func NewGrpcServer(scaleHandler *scaling.ScaleHandler, address string) GrpcServer { + gsrv := grpc.NewServer() + srv := GrpcServer{ + server: gsrv, + address: address, + scalerHandler: scaleHandler, + } + + api.RegisterMetricsServiceServer(gsrv, &srv) + return srv +} + +func (s *GrpcServer) startServer() error { + lis, err := net.Listen("tcp", s.address) + if err != nil { + return fmt.Errorf("failed to listen: %v", err) + } + + if err := s.server.Serve(lis); err != nil { + return fmt.Errorf("failed to serve: %v", err) + } + + return nil +} + +// Start starts a new gRPC Metrics Service, this implements Runnable interface +// of controller-runtime Manager, so we can use mgr.Add() to start this component. +func (s *GrpcServer) Start(ctx context.Context) error { + errChan := make(chan error) + + go func() { + log.Info("Starting Metrics Service gRPC Server", "address", s.address) + if err := s.startServer(); err != nil { + err := fmt.Errorf("unable to start Metrics Service gRPC server on address %s, error: %w", s.address, err) + log.Error(err, "error starting Metrics Service gRPC server") + errChan <- err + } + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + return nil + } +} + +// NeedLeaderElection is needed to implement LeaderElectionRunnable interface +// of controller-runtime. This assures that the component is started/stoped +// when this particular instance is selected/deselected as a leader. +func (s *GrpcServer) NeedLeaderElection() bool { + return true +} diff --git a/pkg/mock/mock_scaler/mock_scaler.go b/pkg/mock/mock_scaler/mock_scaler.go index 7a0a4993584..7615887ae30 100644 --- a/pkg/mock/mock_scaler/mock_scaler.go +++ b/pkg/mock/mock_scaler/mock_scaler.go @@ -10,7 +10,6 @@ import ( gomock "github.com/golang/mock/gomock" v2 "k8s.io/api/autoscaling/v2" - labels "k8s.io/apimachinery/pkg/labels" external_metrics "k8s.io/metrics/pkg/apis/external_metrics" ) @@ -66,18 +65,18 @@ func (mr *MockScalerMockRecorder) GetMetricSpecForScaling(ctx interface{}) *gomo } // GetMetrics mocks base method. -func (m *MockScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (m *MockScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName, metricSelector) + ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName) ret0, _ := ret[0].([]external_metrics.ExternalMetricValue) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMetrics indicates an expected call of GetMetrics. -func (mr *MockScalerMockRecorder) GetMetrics(ctx, metricName, metricSelector interface{}) *gomock.Call { +func (mr *MockScalerMockRecorder) GetMetrics(ctx, metricName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockScaler)(nil).GetMetrics), ctx, metricName, metricSelector) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockScaler)(nil).GetMetrics), ctx, metricName) } // IsActive mocks base method. @@ -147,18 +146,18 @@ func (mr *MockPushScalerMockRecorder) GetMetricSpecForScaling(ctx interface{}) * } // GetMetrics mocks base method. -func (m *MockPushScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (m *MockPushScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName, metricSelector) + ret := m.ctrl.Call(m, "GetMetrics", ctx, metricName) ret0, _ := ret[0].([]external_metrics.ExternalMetricValue) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMetrics indicates an expected call of GetMetrics. -func (mr *MockPushScalerMockRecorder) GetMetrics(ctx, metricName, metricSelector interface{}) *gomock.Call { +func (mr *MockPushScalerMockRecorder) GetMetrics(ctx, metricName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockPushScaler)(nil).GetMetrics), ctx, metricName, metricSelector) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetrics", reflect.TypeOf((*MockPushScaler)(nil).GetMetrics), ctx, metricName) } // IsActive mocks base method. diff --git a/pkg/mock/mock_scaling/mock_interface.go b/pkg/mock/mock_scaling/mock_interface.go index 3b46f343d62..8cc1a70b93b 100644 --- a/pkg/mock/mock_scaling/mock_interface.go +++ b/pkg/mock/mock_scaling/mock_interface.go @@ -9,7 +9,9 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + api "github.com/kedacore/keda/v2/pkg/metricsservice/api" cache "github.com/kedacore/keda/v2/pkg/scaling/cache" + external_metrics "k8s.io/metrics/pkg/apis/external_metrics" ) // MockScaleHandler is a mock of ScaleHandler interface. @@ -63,6 +65,22 @@ func (mr *MockScaleHandlerMockRecorder) DeleteScalableObject(ctx, scalableObject return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScalableObject", reflect.TypeOf((*MockScaleHandler)(nil).DeleteScalableObject), ctx, scalableObject) } +// GetExternalMetrics mocks base method. +func (m *MockScaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *api.PromMetricsMsg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExternalMetrics", ctx, scaledObjectName, scaledObjectNamespace, metricName) + ret0, _ := ret[0].(*external_metrics.ExternalMetricValueList) + ret1, _ := ret[1].(*api.PromMetricsMsg) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetExternalMetrics indicates an expected call of GetExternalMetrics. +func (mr *MockScaleHandlerMockRecorder) GetExternalMetrics(ctx, scaledObjectName, scaledObjectNamespace, metricName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExternalMetrics", reflect.TypeOf((*MockScaleHandler)(nil).GetExternalMetrics), ctx, scaledObjectName, scaledObjectNamespace, metricName) +} + // GetScalersCache mocks base method. func (m *MockScaleHandler) GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) { m.ctrl.T.Helper() diff --git a/pkg/prommetrics/adapter_prommetrics.go b/pkg/prommetrics/adapter/adapter_prommetrics.go similarity index 94% rename from pkg/prommetrics/adapter_prommetrics.go rename to pkg/prommetrics/adapter/adapter_prommetrics.go index 5015cb612ab..fddcc7066e5 100644 --- a/pkg/prommetrics/adapter_prommetrics.go +++ b/pkg/prommetrics/adapter/adapter_prommetrics.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package prommetrics +package adapter import ( "log" @@ -108,8 +108,7 @@ func (metricsServer PrometheusMetricServer) RecordHPAScalerMetric(namespace stri func (metricsServer PrometheusMetricServer) RecordHPAScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) { if err != nil { scalerErrors.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Inc() - // scaledObjectErrors.With(prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject}).Inc() - metricsServer.RecordScalerObjectError(namespace, scaledObject, err) + metricsServer.RecordScaledObjectError(namespace, scaledObject, err) scalerErrorsTotal.With(prometheus.Labels{}).Inc() return } @@ -121,7 +120,7 @@ func (metricsServer PrometheusMetricServer) RecordHPAScalerError(namespace strin } // RecordScalerObjectError counts the number of errors with the scaled object -func (metricsServer PrometheusMetricServer) RecordScalerObjectError(namespace string, scaledObject string, err error) { +func (metricsServer PrometheusMetricServer) RecordScaledObjectError(namespace string, scaledObject string, err error) { labels := prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject} if err != nil { scaledObjectErrors.With(labels).Inc() diff --git a/pkg/prommetrics/operator_prommetrics.go b/pkg/prommetrics/operator_prommetrics.go deleted file mode 100644 index 7e03e3adfbe..00000000000 --- a/pkg/prommetrics/operator_prommetrics.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2022 The KEDA Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package prommetrics - -import ( - "github.com/prometheus/client_golang/prometheus" - "sigs.k8s.io/controller-runtime/pkg/metrics" -) - -const ( - ClusterTriggerAuthenticationResource = "cluster_trigger_authentication" - TriggerAuthenticationResource = "trigger_authentication" - ScaledObjectResource = "scaled_object" - ScaledJobResource = "scaled_job" -) - -var ( - triggerTotalsGaugeVec = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: "keda_operator", - Subsystem: "trigger", - Name: "totals", - }, - []string{"type"}, - ) - - crdTotalsGaugeVec = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: "keda_operator", - Subsystem: "resource", - Name: "totals", - }, - []string{"type", "namespace"}, - ) -) - -func init() { - metrics.Registry.MustRegister(triggerTotalsGaugeVec) - metrics.Registry.MustRegister(crdTotalsGaugeVec) -} - -func IncrementTriggerTotal(triggerType string) { - if triggerType != "" { - triggerTotalsGaugeVec.WithLabelValues(triggerType).Inc() - } -} - -func DecrementTriggerTotal(triggerType string) { - if triggerType != "" { - triggerTotalsGaugeVec.WithLabelValues(triggerType).Dec() - } -} - -func IncrementCRDTotal(crdType, namespace string) { - if namespace == "" { - namespace = "default" - } - - crdTotalsGaugeVec.WithLabelValues(crdType, namespace).Inc() -} - -func DecrementCRDTotal(crdType, namespace string) { - if namespace == "" { - namespace = "default" - } - - crdTotalsGaugeVec.WithLabelValues(crdType, namespace).Dec() -} diff --git a/pkg/prommetrics/prommetrics.go b/pkg/prommetrics/prommetrics.go index 8b86fd24017..925f7728173 100644 --- a/pkg/prommetrics/prommetrics.go +++ b/pkg/prommetrics/prommetrics.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The KEDA Authors +Copyright 2022 The KEDA Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,10 +16,156 @@ limitations under the License. package prommetrics -// Server an HTTP serving instance to track metrics -type Server interface { - NewServer(address string, pattern string) - RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) - RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value int64) - RecordScalerObjectError(namespace string, scaledObject string, err error) +import ( + "strconv" + + "github.com/prometheus/client_golang/prometheus" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var log = ctrl.Log.WithName("prometheus_server") + +const ( + ClusterTriggerAuthenticationResource = "cluster_trigger_authentication" + TriggerAuthenticationResource = "trigger_authentication" + ScaledObjectResource = "scaled_object" + ScaledJobResource = "scaled_job" + + DefaultPromMetricsNamespace = "keda" +) + +var ( + metricLabels = []string{"namespace", "metric", "scaledObject", "scaler", "scalerIndex"} + scalerErrorsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "scaler", + Name: "errors_total", + Help: "Total number of errors for all scalers", + }, + []string{}, + ) + scalerMetricsValue = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "scaler", + Name: "metrics_value", + Help: "Metric Value used for HPA", + }, + metricLabels, + ) + scalerErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "scaler", + Name: "errors", + Help: "Number of scaler errors", + }, + metricLabels, + ) + scaledObjectErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "scaled", + Name: "errors", + Help: "Number of scaled object errors", + }, + []string{"namespace", "scaledObject"}, + ) + + triggerTotalsGaugeVec = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "trigger", + Name: "totals", + }, + []string{"type"}, + ) + + crdTotalsGaugeVec = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "resource", + Name: "totals", + }, + []string{"type", "namespace"}, + ) +) + +func init() { + metrics.Registry.MustRegister(scalerErrorsTotal) + metrics.Registry.MustRegister(scalerMetricsValue) + metrics.Registry.MustRegister(scalerErrors) + metrics.Registry.MustRegister(scaledObjectErrors) + + metrics.Registry.MustRegister(triggerTotalsGaugeVec) + metrics.Registry.MustRegister(crdTotalsGaugeVec) +} + +// RecordScalerMetric create a measurement of the external metric used by the HPA +func RecordScalerMetric(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, value float64) { + scalerMetricsValue.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Set(value) +} + +// RecordScalerError counts the number of errors occurred in trying get an external metric used by the HPA +func RecordScalerError(namespace string, scaledObject string, scaler string, scalerIndex int, metric string, err error) { + if err != nil { + scalerErrors.With(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)).Inc() + RecordScaledObjectError(namespace, scaledObject, err) + scalerErrorsTotal.With(prometheus.Labels{}).Inc() + return + } + // initialize metric with 0 if not already set + _, errscaler := scalerErrors.GetMetricWith(getLabels(namespace, scaledObject, scaler, scalerIndex, metric)) + if errscaler != nil { + log.Error(errscaler, "Unable to write to metrics to Prometheus Server: %v") + } +} + +// RecordScaleObjectError counts the number of errors with the scaled object +func RecordScaledObjectError(namespace string, scaledObject string, err error) { + labels := prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject} + if err != nil { + scaledObjectErrors.With(labels).Inc() + return + } + // initialize metric with 0 if not already set + _, errscaledobject := scaledObjectErrors.GetMetricWith(labels) + if errscaledobject != nil { + log.Error(errscaledobject, "Unable to write to metrics to Prometheus Server: %v") + return + } +} + +func getLabels(namespace string, scaledObject string, scaler string, scalerIndex int, metric string) prometheus.Labels { + return prometheus.Labels{"namespace": namespace, "scaledObject": scaledObject, "scaler": scaler, "scalerIndex": strconv.Itoa(scalerIndex), "metric": metric} +} + +func IncrementTriggerTotal(triggerType string) { + if triggerType != "" { + triggerTotalsGaugeVec.WithLabelValues(triggerType).Inc() + } +} + +func DecrementTriggerTotal(triggerType string) { + if triggerType != "" { + triggerTotalsGaugeVec.WithLabelValues(triggerType).Dec() + } +} + +func IncrementCRDTotal(crdType, namespace string) { + if namespace == "" { + namespace = "default" + } + + crdTotalsGaugeVec.WithLabelValues(crdType, namespace).Inc() +} + +func DecrementCRDTotal(crdType, namespace string) { + if namespace == "" { + namespace = "default" + } + + crdTotalsGaugeVec.WithLabelValues(crdType, namespace).Dec() } diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index c1ae91303e5..e0e71340509 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -32,10 +32,14 @@ import ( "sigs.k8s.io/custom-metrics-apiserver/pkg/provider" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/prommetrics" + "github.com/kedacore/keda/v2/pkg/fallback" + "github.com/kedacore/keda/v2/pkg/metricsservice" + prommetrics "github.com/kedacore/keda/v2/pkg/prommetrics/adapter" "github.com/kedacore/keda/v2/pkg/scaling" ) +// prommetrics "github.com/kedacore/keda/v2/pkg/prommetrics/adapter" + // KedaProvider implements External Metrics Provider type KedaProvider struct { client client.Client @@ -44,15 +48,18 @@ type KedaProvider struct { ctx context.Context externalMetricsInfo *[]provider.ExternalMetricInfo externalMetricsInfoLock *sync.RWMutex + + grpcClient metricsservice.GrpcClient + useMetricsServiceGrpc bool } var ( - logger logr.Logger - metricsServer prommetrics.PrometheusMetricServer + logger logr.Logger + promMetricsServer prommetrics.PrometheusMetricServer ) // NewProvider returns an instance of KedaProvider -func NewProvider(ctx context.Context, adapterLogger logr.Logger, scaleHandler scaling.ScaleHandler, client client.Client, watchedNamespace string, externalMetricsInfo *[]provider.ExternalMetricInfo, externalMetricsInfoLock *sync.RWMutex) provider.MetricsProvider { +func NewProvider(ctx context.Context, adapterLogger logr.Logger, scaleHandler scaling.ScaleHandler, client client.Client, grpcClient metricsservice.GrpcClient, useMetricsServiceGrpc bool, watchedNamespace string, externalMetricsInfo *[]provider.ExternalMetricInfo, externalMetricsInfoLock *sync.RWMutex) provider.MetricsProvider { provider := &KedaProvider{ client: client, scaleHandler: scaleHandler, @@ -60,6 +67,8 @@ func NewProvider(ctx context.Context, adapterLogger logr.Logger, scaleHandler sc ctx: ctx, externalMetricsInfo: externalMetricsInfo, externalMetricsInfoLock: externalMetricsInfoLock, + grpcClient: grpcClient, + useMetricsServiceGrpc: useMetricsServiceGrpc, } logger = adapterLogger.WithName("provider") logger.Info("starting") @@ -81,7 +90,37 @@ func (p *KedaProvider) GetExternalMetric(ctx context.Context, namespace string, return nil, err } - // get the scaled objects matching namespace and labels + // Get Metrics from Metrics Service gRPC Server + if p.useMetricsServiceGrpc { + // selector is in form: `scaledobject.keda.sh/name: scaledobject-name` + scaledObjectName := selector.Get("scaledobject.keda.sh/name") + + metrics, promMetrics, err := p.grpcClient.GetMetrics(ctx, scaledObjectName, namespace, info.Metric) + logger.V(1).WithValues("scaledObjectName", scaledObjectName, "scaledObjectNamespace", namespace, "metrics", metrics).Info("Receiving metrics") + + // [DEPRECATED] handle exporting Prometheus metrics from Operator to Metrics Server + var scaledObjectErr error + if promMetrics.ScaledObjectErr { + scaledObjectErr = fmt.Errorf("scaledObject error") + } + promMetricsServer.RecordScaledObjectError(namespace, scaledObjectName, scaledObjectErr) + for _, scalerMetric := range promMetrics.ScalerMetric { + promMetricsServer.RecordHPAScalerMetric(namespace, scaledObjectName, scalerMetric.ScalerName, int(scalerMetric.ScalerIndex), scalerMetric.MetricName, float64(scalerMetric.MetricValue)) + } + for _, scalerError := range promMetrics.ScalerError { + var scalerErr error + if scalerError.Error { + scalerErr = fmt.Errorf("scaler error") + } + promMetricsServer.RecordHPAScalerError(namespace, scaledObjectName, scalerError.ScalerName, int(scalerError.ScalerIndex), scalerError.MetricName, scalerErr) + } + + return metrics, err + } + + // ------ Deprecated way of getting metric directly from MS ------ // + // --------------------------------------------------------------- // + // Get Metrics by querying directly the external service scaledObjects := &kedav1alpha1.ScaledObjectList{} opts := []client.ListOption{ client.InNamespace(namespace), @@ -98,7 +137,7 @@ func (p *KedaProvider) GetExternalMetric(ctx context.Context, namespace string, var matchingMetrics []external_metrics.ExternalMetricValue cache, err := p.scaleHandler.GetScalersCache(ctx, scaledObject) - metricsServer.RecordScalerObjectError(scaledObject.Namespace, scaledObject.Name, err) + promMetricsServer.RecordScaledObjectError(scaledObject.Namespace, scaledObject.Name, err) if err != nil { return nil, fmt.Errorf("error when getting scalers %s", err) } @@ -120,19 +159,19 @@ func (p *KedaProvider) GetExternalMetric(ctx context.Context, namespace string, } // Filter only the desired metric if strings.EqualFold(metricSpec.External.Metric.Name, info.Metric) { - metrics, err := cache.GetMetricsForScaler(ctx, scalerIndex, info.Metric, metricSelector) - metrics, err = p.getMetricsWithFallback(ctx, metrics, err, info.Metric, scaledObject, metricSpec) + metrics, err := cache.GetMetricsForScaler(ctx, scalerIndex, info.Metric) + metrics, err = fallback.GetMetricsWithFallback(ctx, p.client, logger, metrics, err, info.Metric, scaledObject, metricSpec) if err != nil { scalerError = true logger.Error(err, "error getting metric for scaler", "scaledObject.Namespace", scaledObject.Namespace, "scaledObject.Name", scaledObject.Name, "scaler", scalerName) } else { for _, metric := range metrics { metricValue := metric.Value.AsApproximateFloat64() - metricsServer.RecordHPAScalerMetric(namespace, scaledObject.Name, scalerName, scalerIndex, metric.MetricName, metricValue) + promMetricsServer.RecordHPAScalerMetric(namespace, scaledObject.Name, scalerName, scalerIndex, metric.MetricName, metricValue) } matchingMetrics = append(matchingMetrics, metrics...) } - metricsServer.RecordHPAScalerError(namespace, scaledObject.Name, scalerName, scalerIndex, info.Metric, err) + promMetricsServer.RecordHPAScalerError(namespace, scaledObject.Name, scalerName, scalerIndex, info.Metric, err) } } } diff --git a/pkg/scalers/activemq_scaler.go b/pkg/scalers/activemq_scaler.go index b7172d2d071..6bd22c72d25 100644 --- a/pkg/scalers/activemq_scaler.go +++ b/pkg/scalers/activemq_scaler.go @@ -14,7 +14,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -279,7 +278,7 @@ func (s *activeMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpe return []v2.MetricSpec{metricSpec} } -func (s *activeMQScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *activeMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queueSize, err := s.getQueueMessageCount(ctx) if err != nil { return nil, fmt.Errorf("error inspecting ActiveMQ queue size: %s", err) diff --git a/pkg/scalers/artemis_scaler.go b/pkg/scalers/artemis_scaler.go index b2ea3acea70..df8d9a1480a 100644 --- a/pkg/scalers/artemis_scaler.go +++ b/pkg/scalers/artemis_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -279,7 +278,7 @@ func (s *artemisScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metric } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *artemisScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *artemisScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { messages, err := s.getQueueMessageCount(ctx) if err != nil { diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index f79dbf789d8..646988995eb 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -274,7 +273,7 @@ func computeQueryWindow(current time.Time, metricPeriodSec, metricEndTimeOffsetS return } -func (s *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { metricValue, err := s.GetCloudwatchMetrics() if err != nil { diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go index 9e262f94842..e07153cdff0 100644 --- a/pkg/scalers/aws_cloudwatch_scaler_test.go +++ b/pkg/scalers/aws_cloudwatch_scaler_test.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/labels" ) const ( @@ -500,10 +499,9 @@ func TestAWSCloudwatchGetMetricSpecForScaling(t *testing.T) { } func TestAWSCloudwatchScalerGetMetrics(t *testing.T) { - var selector labels.Selector for _, meta := range awsCloudwatchGetMetricTestData { mockAWSCloudwatchScaler := awsCloudwatchScaler{"", &meta, &mockCloudwatch{}, logr.Discard()} - value, err := mockAWSCloudwatchScaler.GetMetrics(context.Background(), meta.metricsName, selector) + value, err := mockAWSCloudwatchScaler.GetMetrics(context.Background(), meta.metricsName) switch meta.metricsName { case testAWSCloudwatchErrorMetric: assert.Error(t, err, "expect error because of cloudwatch api error") diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go index 246924b5c97..935d4316e23 100644 --- a/pkg/scalers/aws_dynamodb_scaler.go +++ b/pkg/scalers/aws_dynamodb_scaler.go @@ -13,7 +13,6 @@ import ( "github.com/go-logr/logr" "go.mongodb.org/mongo-driver/bson" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -152,7 +151,7 @@ func createDynamoDBClient(metadata *awsDynamoDBMetadata) *dynamodb.DynamoDB { return dynamodb.New(sess, config) } -func (s *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { metricValue, err := s.GetQueryMetrics() if err != nil { s.logger.Error(err, "Error getting metric value") diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go index c8be74c6215..12ebc276544 100644 --- a/pkg/scalers/aws_dynamodb_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_scaler_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/labels" ) const ( @@ -302,13 +301,11 @@ var awsDynamoDBGetMetricTestData = []awsDynamoDBMetadata{ } func TestDynamoGetMetrics(t *testing.T) { - var selector labels.Selector - for _, meta := range awsDynamoDBGetMetricTestData { t.Run(meta.tableName, func(t *testing.T) { scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "aws-dynamodb", selector) + value, err := scaler.GetMetrics(context.Background(), "aws-dynamodb") switch meta.tableName { case testAWSDynamoErrorTable: assert.Error(t, err, "expect error because of dynamodb api error") diff --git a/pkg/scalers/aws_dynamodb_streams_scaler.go b/pkg/scalers/aws_dynamodb_streams_scaler.go index 5efbc939c1e..6f6c06d903c 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler.go @@ -13,7 +13,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -174,7 +173,7 @@ func (s *awsDynamoDBStreamsScaler) GetMetricSpecForScaling(context.Context) []v2 } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsDynamoDBStreamsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsDynamoDBStreamsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { shardCount, err := s.GetDynamoDBStreamShardCount(ctx) if err != nil { diff --git a/pkg/scalers/aws_dynamodb_streams_scaler_test.go b/pkg/scalers/aws_dynamodb_streams_scaler_test.go index 011bfbbfb50..c9cf2fed967 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler_test.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" ) @@ -414,7 +413,6 @@ func TestAwsDynamoDBStreamsGetMetricSpecForScaling(t *testing.T) { } func TestAwsDynamoDBStreamsScalerGetMetrics(t *testing.T) { - var selector labels.Selector for _, meta := range awsDynamoDBStreamsGetMetricTestData { var value []external_metrics.ExternalMetricValue var err error @@ -423,7 +421,7 @@ func TestAwsDynamoDBStreamsScalerGetMetrics(t *testing.T) { streamArn, err = getDynamoDBStreamsArn(ctx, &mockAwsDynamoDB{}, &meta.tableName) if err == nil { scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Discard()} - value, err = scaler.GetMetrics(context.Background(), "MetricName", selector) + value, err = scaler.GetMetrics(context.Background(), "MetricName") } switch meta.tableName { case testAWSDynamoDBErrorTable: diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go index 906c5bef80b..99f877d4ca4 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler.go +++ b/pkg/scalers/aws_kinesis_stream_scaler.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -146,7 +145,7 @@ func (s *awsKinesisStreamScaler) GetMetricSpecForScaling(context.Context) []v2.M } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsKinesisStreamScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsKinesisStreamScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { shardCount, err := s.GetAwsKinesisOpenShardCount() if err != nil { diff --git a/pkg/scalers/aws_kinesis_stream_scaler_test.go b/pkg/scalers/aws_kinesis_stream_scaler_test.go index 05c5630bb30..2422308c309 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler_test.go +++ b/pkg/scalers/aws_kinesis_stream_scaler_test.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/labels" ) const ( @@ -350,10 +349,9 @@ func TestAWSKinesisGetMetricSpecForScaling(t *testing.T) { } func TestAWSKinesisStreamScalerGetMetrics(t *testing.T) { - var selector labels.Selector for _, meta := range awsKinesisGetMetricTestData { scaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "MetricName", selector) + value, err := scaler.GetMetrics(context.Background(), "MetricName") switch meta.streamName { case testAWSKinesisErrorStream: assert.Error(t, err, "expect error because of kinesis api error") diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index 2bec847f11f..e585514b034 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/aws/aws-sdk-go/service/sqs/sqsiface" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -194,7 +193,7 @@ func (s *awsSqsQueueScaler) GetMetricSpecForScaling(context.Context) []v2.Metric } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *awsSqsQueueScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *awsSqsQueueScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queuelen, err := s.getAwsSqsQueueLength() if err != nil { diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go index 6881ef43317..35eba831582 100644 --- a/pkg/scalers/aws_sqs_queue_scaler_test.go +++ b/pkg/scalers/aws_sqs_queue_scaler_test.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/service/sqs/sqsiface" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/labels" ) const ( @@ -344,10 +343,9 @@ func TestAWSSQSGetMetricSpecForScaling(t *testing.T) { } func TestAWSSQSScalerGetMetrics(t *testing.T) { - var selector labels.Selector for _, meta := range awsSQSGetMetricTestData { scaler := awsSqsQueueScaler{"", meta, &mockSqs{}, logr.Discard()} - value, err := scaler.GetMetrics(context.Background(), "MetricName", selector) + value, err := scaler.GetMetrics(context.Background(), "MetricName") switch meta.queueURL { case testAWSSQSErrorQueueURL: assert.Error(t, err, "expect error because of sqs api error") diff --git a/pkg/scalers/azure_app_insights_scaler.go b/pkg/scalers/azure_app_insights_scaler.go index badc4f4bf1b..8d12a565897 100644 --- a/pkg/scalers/azure_app_insights_scaler.go +++ b/pkg/scalers/azure_app_insights_scaler.go @@ -8,7 +8,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -191,7 +190,7 @@ func (s *azureAppInsightsScaler) GetMetricSpecForScaling(context.Context) []v2.M } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureAppInsightsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureAppInsightsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := azure.GetAzureAppInsightsMetricValue(ctx, s.metadata.azureAppInsightsInfo, s.podIdentity) if err != nil { s.logger.Error(err, "error getting azure app insights metric") diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go index d3e3781ebd7..20913d40389 100644 --- a/pkg/scalers/azure_blob_scaler.go +++ b/pkg/scalers/azure_blob_scaler.go @@ -25,7 +25,6 @@ import ( "github.com/go-logr/logr" "github.com/gobwas/glob" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -214,7 +213,7 @@ func (s *azureBlobScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { bloblen, err := azure.GetAzureBlobListLength( ctx, s.httpClient, diff --git a/pkg/scalers/azure_data_explorer_scaler.go b/pkg/scalers/azure_data_explorer_scaler.go index c8bd6d2efcb..a95856b7f88 100644 --- a/pkg/scalers/azure_data_explorer_scaler.go +++ b/pkg/scalers/azure_data_explorer_scaler.go @@ -24,7 +24,6 @@ import ( "github.com/Azure/azure-kusto-go/kusto" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -170,7 +169,7 @@ func parseAzureDataExplorerAuthParams(config *ScalerConfig, logger logr.Logger) return &metadata, nil } -func (s azureDataExplorerScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s azureDataExplorerScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { metricValue, err := azure.GetAzureDataExplorerMetricValue(ctx, s.client, s.metadata.DatabaseName, s.metadata.Query) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to get metrics for scaled object %s in namespace %s: %v", s.name, s.namespace, err) diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go index fe21add22e0..8c98835b0fe 100644 --- a/pkg/scalers/azure_eventhub_scaler.go +++ b/pkg/scalers/azure_eventhub_scaler.go @@ -30,7 +30,6 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -373,7 +372,7 @@ func (s *azureEventHubScaler) GetMetricSpecForScaling(context.Context) []v2.Metr } // GetMetrics returns metric using total number of unprocessed events in event hub -func (s *azureEventHubScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureEventHubScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { totalUnprocessedEventCount := int64(0) runtimeInfo, err := s.client.GetRuntimeInformation(ctx) if err != nil { diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go index 2979b08593c..e055e470feb 100644 --- a/pkg/scalers/azure_log_analytics_scaler.go +++ b/pkg/scalers/azure_log_analytics_scaler.go @@ -34,7 +34,6 @@ import ( "github.com/Azure/azure-amqp-common-go/v3/auth" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -283,7 +282,7 @@ func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(ctx context.Context) [ } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureLogAnalyticsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureLogAnalyticsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { receivedMetric, err := s.getMetricData(ctx) if err != nil { diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go index eef54965f6e..fdb75607a16 100644 --- a/pkg/scalers/azure_monitor_scaler.go +++ b/pkg/scalers/azure_monitor_scaler.go @@ -25,7 +25,6 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -241,7 +240,7 @@ func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2.Metri } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureMonitorScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureMonitorScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) if err != nil { s.logger.Error(err, "error getting azure monitor metric") diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index 5b4bc6a9ae1..7ec6eed649f 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -310,7 +309,7 @@ func getAzurePipelineRequest(ctx context.Context, url string, metadata *azurePip return b, nil } -func (s *azurePipelinesScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azurePipelinesScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queuelen, err := s.GetAzurePipelinesQueueLength(ctx) if err != nil { diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 78d43e8044d..307836edaa6 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -24,7 +24,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -195,7 +194,7 @@ func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *azureQueueScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureQueueScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queuelen, err := azure.GetAzureQueueLength( ctx, s.httpClient, diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go index a34fcb6d721..590624de52a 100755 --- a/pkg/scalers/azure_servicebus_scaler.go +++ b/pkg/scalers/azure_servicebus_scaler.go @@ -28,7 +28,6 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -271,7 +270,7 @@ func (s *azureServiceBusScaler) GetMetricSpecForScaling(context.Context) []v2.Me } // Returns the current metrics to be served to the HPA -func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queuelen, err := s.getAzureServiceBusLength(ctx) if err != nil { diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go index d07dfc8283c..88f4776c749 100644 --- a/pkg/scalers/cassandra_scaler.go +++ b/pkg/scalers/cassandra_scaler.go @@ -9,7 +9,6 @@ import ( "github.com/go-logr/logr" "github.com/gocql/gocql" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -206,7 +205,7 @@ func (s *cassandraScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metr } // GetMetrics returns a value for a supported metric or an error if there is a problem getting the metric. -func (s *cassandraScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *cassandraScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.GetQueryResult(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting cassandra: %s", err) diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index d403dd4159d..cfd7db7c84b 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -9,7 +9,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" ) @@ -123,6 +122,6 @@ func (s *cpuMemoryScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp } // GetMetrics no need for cpu/memory scaler -func (s *cpuMemoryScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *cpuMemoryScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { return nil, nil } diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go index 472605f2350..df48185b923 100644 --- a/pkg/scalers/cron_scaler.go +++ b/pkg/scalers/cron_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" "github.com/robfig/cron/v3" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -170,7 +169,7 @@ func (s *cronScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics finds the current value of the metric -func (s *cronScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *cronScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { var currentReplicas = int64(defaultDesiredReplicas) isActive, err := s.IsActive(ctx) if err != nil { diff --git a/pkg/scalers/cron_scaler_test.go b/pkg/scalers/cron_scaler_test.go index e763956668c..33123800afb 100644 --- a/pkg/scalers/cron_scaler_test.go +++ b/pkg/scalers/cron_scaler_test.go @@ -93,7 +93,7 @@ func TestIsActiveRange(t *testing.T) { func TestGetMetrics(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata}) - metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount", nil) + metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount") assert.Equal(t, metrics[0].MetricName, "ReplicaCount") if currentDay == "Thursday" { assert.Equal(t, metrics[0].Value.Value(), int64(10)) @@ -104,7 +104,7 @@ func TestGetMetrics(t *testing.T) { func TestGetMetricsRange(t *testing.T) { scaler, _ := NewCronScaler(&ScalerConfig{TriggerMetadata: validCronMetadata2}) - metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount", nil) + metrics, _ := scaler.GetMetrics(context.TODO(), "ReplicaCount") assert.Equal(t, metrics[0].MetricName, "ReplicaCount") if currentHour%2 == 0 { assert.Equal(t, metrics[0].Value.Value(), int64(10)) diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go index 216f1a27ca5..ffb6940f10c 100644 --- a/pkg/scalers/datadog_scaler.go +++ b/pkg/scalers/datadog_scaler.go @@ -11,7 +11,6 @@ import ( datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -339,7 +338,7 @@ func (s *datadogScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *datadogScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *datadogScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { s.logger.Error(err, "error getting metrics from Datadog") diff --git a/pkg/scalers/elasticsearch_scaler.go b/pkg/scalers/elasticsearch_scaler.go index 88832c558e5..c2aeb57af3e 100644 --- a/pkg/scalers/elasticsearch_scaler.go +++ b/pkg/scalers/elasticsearch_scaler.go @@ -15,7 +15,6 @@ import ( "github.com/go-logr/logr" "github.com/tidwall/gjson" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -341,7 +340,7 @@ func (s *elasticsearchScaler) GetMetricSpecForScaling(context.Context) []v2.Metr } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *elasticsearchScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *elasticsearchScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting elasticsearch: %s", err) diff --git a/pkg/scalers/external_mock_scaler.go b/pkg/scalers/external_mock_scaler.go index 982be7bf8ef..2e880e1b103 100644 --- a/pkg/scalers/external_mock_scaler.go +++ b/pkg/scalers/external_mock_scaler.go @@ -8,7 +8,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" ) @@ -55,7 +54,7 @@ func (*externalMockScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Met } // GetMetrics implements Scaler -func (*externalMockScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (*externalMockScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { if atomic.LoadInt32(&MockExternalServerStatus) != MockExternalServerStatusOnline { return nil, ErrMock } diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go index fa89d80f6ec..2b83ee26cac 100644 --- a/pkg/scalers/external_scaler.go +++ b/pkg/scalers/external_scaler.go @@ -14,7 +14,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" pb "github.com/kedacore/keda/v2/pkg/scalers/externalscaler" @@ -186,7 +185,7 @@ func (s *externalScaler) GetMetricSpecForScaling(ctx context.Context) []v2.Metri } // GetMetrics connects calls the gRPC interface to get the metrics with a specific name -func (s *externalScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *externalScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { var metrics []external_metrics.ExternalMetricValue grpcClient, err := getClientForConnectionPool(s.metadata) if err != nil { diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go index 3f081a17e5b..1e9571f092c 100644 --- a/pkg/scalers/gcp_pubsub_scaler.go +++ b/pkg/scalers/gcp_pubsub_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -189,7 +188,7 @@ func (s *pubsubScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec } // GetMetrics connects to Stack Driver and finds the size of the pub sub subscription -func (s *pubsubScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *pubsubScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { var value int64 var err error diff --git a/pkg/scalers/gcp_stackdriver_scaler.go b/pkg/scalers/gcp_stackdriver_scaler.go index 2280ad6f557..efb6e70593e 100644 --- a/pkg/scalers/gcp_stackdriver_scaler.go +++ b/pkg/scalers/gcp_stackdriver_scaler.go @@ -8,7 +8,6 @@ import ( monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -202,7 +201,7 @@ func (s *stackdriverScaler) GetMetricSpecForScaling(context.Context) []v2.Metric } // GetMetrics connects to Stack Driver and retrieves the metric -func (s *stackdriverScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *stackdriverScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { value, err := s.getMetrics(ctx) if err != nil { s.logger.Error(err, "error getting metric value") diff --git a/pkg/scalers/gcp_storage_scaler.go b/pkg/scalers/gcp_storage_scaler.go index 004451710d7..5600fc9a2ac 100644 --- a/pkg/scalers/gcp_storage_scaler.go +++ b/pkg/scalers/gcp_storage_scaler.go @@ -11,7 +11,6 @@ import ( "google.golang.org/api/iterator" option "google.golang.org/api/option" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -188,7 +187,7 @@ func (s *gcsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics returns the number of items in the bucket (up to s.metadata.maxBucketItemsToScan) -func (s *gcsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *gcsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { items, err := s.getItemCount(ctx, s.metadata.maxBucketItemsToScan) if err != nil { return []external_metrics.ExternalMetricValue{}, err diff --git a/pkg/scalers/graphite_scaler.go b/pkg/scalers/graphite_scaler.go index 1990e4ae257..0c14982016f 100644 --- a/pkg/scalers/graphite_scaler.go +++ b/pkg/scalers/graphite_scaler.go @@ -11,7 +11,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -224,7 +223,7 @@ func (s *graphiteScaler) executeGrapQuery(ctx context.Context) (float64, error) return -1, fmt.Errorf("no valid non-null response in query %s, try increasing your queryTime or check your query", s.metadata.query) } -func (s *graphiteScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *graphiteScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.executeGrapQuery(ctx) if err != nil { s.logger.Error(err, "error executing graphite query") diff --git a/pkg/scalers/huawei_cloudeye_scaler.go b/pkg/scalers/huawei_cloudeye_scaler.go index 2b6e6d1fc62..f80bcc629cc 100644 --- a/pkg/scalers/huawei_cloudeye_scaler.go +++ b/pkg/scalers/huawei_cloudeye_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/Huawei/gophercloud/openstack/ces/v1/metricdata" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -241,7 +240,7 @@ func gethuaweiAuthorization(authParams map[string]string) (huaweiAuthorizationMe return meta, nil } -func (s *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { metricValue, err := s.GetCloudeyeMetrics() if err != nil { diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go index db2a5fa1fd9..c6767d31b59 100644 --- a/pkg/scalers/ibmmq_scaler.go +++ b/pkg/scalers/ibmmq_scaler.go @@ -14,7 +14,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -230,7 +229,7 @@ func (s *IBMMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *IBMMQScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *IBMMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { queueDepth, err := s.getQueueDepthViaHTTP(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting IBM MQ queue depth: %s", err) diff --git a/pkg/scalers/influxdb_scaler.go b/pkg/scalers/influxdb_scaler.go index 25a380a7c27..cd1f48b8c5d 100644 --- a/pkg/scalers/influxdb_scaler.go +++ b/pkg/scalers/influxdb_scaler.go @@ -10,7 +10,6 @@ import ( influxdb2 "github.com/influxdata/influxdb-client-go/v2" api "github.com/influxdata/influxdb-client-go/v2/api" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -208,7 +207,7 @@ func queryInfluxDB(ctx context.Context, queryAPI api.QueryAPI, query string) (fl } // GetMetrics connects to influxdb via the client and returns a value based on the query -func (s *influxDBScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *influxDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { // Grab QueryAPI to make queries to influxdb instance queryAPI := s.client.QueryAPI(s.metadata.organizationName) diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go index 9899c41c2e8..09fdb684030 100644 --- a/pkg/scalers/kafka_scaler.go +++ b/pkg/scalers/kafka_scaler.go @@ -11,7 +11,6 @@ import ( "github.com/Shopify/sarama" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -501,7 +500,7 @@ func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][ } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *kafkaScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *kafkaScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { totalLag, err := s.getTotalLag() if err != nil { return []external_metrics.ExternalMetricValue{}, err diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index 25ac5de95ea..07a7812655c 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -116,7 +116,7 @@ func (s *kubernetesWorkloadScaler) GetMetricSpecForScaling(context.Context) []v2 } // GetMetrics returns value for a supported metric -func (s *kubernetesWorkloadScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *kubernetesWorkloadScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { pods, err := s.getMetricValue(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting kubernetes workload: %s", err) diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go index 9cca22035d8..04198828e0d 100644 --- a/pkg/scalers/liiklus_scaler.go +++ b/pkg/scalers/liiklus_scaler.go @@ -11,7 +11,6 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" liiklus_service "github.com/kedacore/keda/v2/pkg/scalers/liiklus" @@ -75,7 +74,7 @@ func NewLiiklusScaler(config *ScalerConfig) (Scaler, error) { return &scaler, nil } -func (s *liiklusScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *liiklusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { totalLag, lags, err := s.getLag(ctx) if err != nil { return nil, err diff --git a/pkg/scalers/liiklus_scaler_test.go b/pkg/scalers/liiklus_scaler_test.go index f793d8c6611..16d95e65fca 100644 --- a/pkg/scalers/liiklus_scaler_test.go +++ b/pkg/scalers/liiklus_scaler_test.go @@ -140,7 +140,7 @@ func TestLiiklusScalerGetMetricsBehavior(t *testing.T) { GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 20, 1: 30}}, nil) - values, err := scaler.GetMetrics(context.Background(), "m", nil) + values, err := scaler.GetMetrics(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return @@ -157,7 +157,7 @@ func TestLiiklusScalerGetMetricsBehavior(t *testing.T) { mockClient.EXPECT(). GetEndOffsets(gomock.Any(), gomock.Any()). Return(&liiklus.GetEndOffsetsReply{Offsets: map[uint32]uint64{0: 20, 1: 30}}, nil) - values, err = scaler.GetMetrics(context.Background(), "m", nil) + values, err = scaler.GetMetrics(context.Background(), "m") if err != nil { t.Errorf("error calling IsActive: %v", err) return diff --git a/pkg/scalers/loki_scaler.go b/pkg/scalers/loki_scaler.go index 0ad0fc41656..d5301ee58bc 100644 --- a/pkg/scalers/loki_scaler.go +++ b/pkg/scalers/loki_scaler.go @@ -11,7 +11,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" @@ -275,7 +274,7 @@ func (s *lokiScaler) ExecuteLokiQuery(ctx context.Context) (float64, error) { } // GetMetrics returns an external metric value for the loki -func (s *lokiScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *lokiScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.ExecuteLokiQuery(ctx) if err != nil { s.logger.Error(err, "error executing loki query") diff --git a/pkg/scalers/metrics_api_scaler.go b/pkg/scalers/metrics_api_scaler.go index f8e7fa33532..08433269e89 100644 --- a/pkg/scalers/metrics_api_scaler.go +++ b/pkg/scalers/metrics_api_scaler.go @@ -14,7 +14,6 @@ import ( "github.com/tidwall/gjson" v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" @@ -283,7 +282,7 @@ func (s *metricsAPIScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *metricsAPIScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *metricsAPIScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.getMetricValue(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error requesting metrics endpoint: %s", err) diff --git a/pkg/scalers/metrics_api_scaler_test.go b/pkg/scalers/metrics_api_scaler_test.go index badefdfe1c9..7dca8cac2a4 100644 --- a/pkg/scalers/metrics_api_scaler_test.go +++ b/pkg/scalers/metrics_api_scaler_test.go @@ -219,7 +219,7 @@ func TestBearerAuth(t *testing.T) { t.Errorf("Error creating the Scaler") } - _, err = s.GetMetrics(context.TODO(), "test-metric", nil) + _, err = s.GetMetrics(context.TODO(), "test-metric") if err != nil { t.Errorf("Error getting the metric") } diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go index 2c9a868fa79..1fb0b63a044 100644 --- a/pkg/scalers/mongo_scaler.go +++ b/pkg/scalers/mongo_scaler.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/bsonx" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -245,7 +244,7 @@ func (s *mongoDBScaler) getQueryResult(ctx context.Context) (int64, error) { } // GetMetrics query from mongoDB,and return to external metrics -func (s *mongoDBScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *mongoDBScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("failed to inspect momgoDB, because of %v", err) diff --git a/pkg/scalers/mssql_scaler.go b/pkg/scalers/mssql_scaler.go index 5e5a82cc086..9bebcf3c521 100644 --- a/pkg/scalers/mssql_scaler.go +++ b/pkg/scalers/mssql_scaler.go @@ -11,7 +11,6 @@ import ( _ "github.com/denisenkom/go-mssqldb" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -245,7 +244,7 @@ func (s *mssqlScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics returns a value for a supported metric or an error if there is a problem getting the metric -func (s *mssqlScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *mssqlScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting mssql: %s", err) diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go index ac3e857c372..019e2bf3db3 100644 --- a/pkg/scalers/mysql_scaler.go +++ b/pkg/scalers/mysql_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" "github.com/go-sql-driver/mysql" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -229,7 +228,7 @@ func (s *mySQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *mySQLScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *mySQLScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting MySQL: %s", err) diff --git a/pkg/scalers/nats_jetstream_scaler.go b/pkg/scalers/nats_jetstream_scaler.go index b038551bd95..df25fe3a239 100644 --- a/pkg/scalers/nats_jetstream_scaler.go +++ b/pkg/scalers/nats_jetstream_scaler.go @@ -12,7 +12,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -229,7 +228,7 @@ func (s *natsJetStreamScaler) GetMetricSpecForScaling(context.Context) []v2.Metr return []v2.MetricSpec{metricSpec} } -func (s *natsJetStreamScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *natsJetStreamScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, s.metadata.monitoringEndpoint, nil) if err != nil { return nil, err diff --git a/pkg/scalers/newrelic_scaler.go b/pkg/scalers/newrelic_scaler.go index 8ff30837278..cf2fd78bebb 100644 --- a/pkg/scalers/newrelic_scaler.go +++ b/pkg/scalers/newrelic_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/newrelic/newrelic-client-go/newrelic" "github.com/newrelic/newrelic-client-go/pkg/nrdb" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -173,7 +172,7 @@ func (s *newrelicScaler) executeNewRelicQuery(ctx context.Context) (float64, err return 0, nil } -func (s *newrelicScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *newrelicScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.executeNewRelicQuery(ctx) if err != nil { s.logger.Error(err, "error executing NRQL query") diff --git a/pkg/scalers/openstack_metrics_scaler.go b/pkg/scalers/openstack_metrics_scaler.go index 17f82a4255f..e026948a445 100644 --- a/pkg/scalers/openstack_metrics_scaler.go +++ b/pkg/scalers/openstack_metrics_scaler.go @@ -13,7 +13,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/openstack" @@ -233,7 +232,7 @@ func (s *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2.Me return []v2.MetricSpec{metricSpec} } -func (s *openstackMetricScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *openstackMetricScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.readOpenstackMetrics(ctx) if err != nil { diff --git a/pkg/scalers/openstack_swift_scaler.go b/pkg/scalers/openstack_swift_scaler.go index 2725443e3f9..3f0927e75ab 100644 --- a/pkg/scalers/openstack_swift_scaler.go +++ b/pkg/scalers/openstack_swift_scaler.go @@ -12,7 +12,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/openstack" @@ -383,7 +382,7 @@ func (s *openstackSwiftScaler) Close(context.Context) error { return nil } -func (s *openstackSwiftScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *openstackSwiftScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { objectCount, err := s.getOpenstackSwiftContainerObjectCount(ctx) if err != nil { diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index ca95ee07b46..6bd4203b05d 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -10,7 +10,6 @@ import ( // PostreSQL drive required for this scaler _ "github.com/lib/pq" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -203,7 +202,7 @@ func (s *postgreSQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *postgreSQLScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *postgreSQLScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getActiveNumber(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error inspecting postgreSQL: %s", err) diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go index b13df42e741..1436185aeb6 100644 --- a/pkg/scalers/predictkube_scaler.go +++ b/pkg/scalers/predictkube_scaler.go @@ -27,7 +27,6 @@ import ( health "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" @@ -220,7 +219,7 @@ func (s *PredictKubeScaler) GetMetricSpecForScaling(context.Context) []v2.Metric return []v2.MetricSpec{metricSpec} } -func (s *PredictKubeScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *PredictKubeScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { value, err := s.doPredictRequest(ctx) if err != nil { s.logger.Error(err, "error executing query to predict controller service") diff --git a/pkg/scalers/predictkube_scaler_test.go b/pkg/scalers/predictkube_scaler_test.go index a2ee3825a9a..97c072a604c 100644 --- a/pkg/scalers/predictkube_scaler_test.go +++ b/pkg/scalers/predictkube_scaler_test.go @@ -217,7 +217,7 @@ func TestPredictKubeGetMetrics(t *testing.T) { ) assert.NoError(t, err) - result, err := mockPredictKubeScaler.GetMetrics(context.Background(), predictKubeMetricPrefix, nil) + result, err := mockPredictKubeScaler.GetMetrics(context.Background(), predictKubeMetricPrefix) assert.NoError(t, err) assert.Equal(t, len(result), 1) assert.Equal(t, result[0].Value, *resource.NewMilliQuantity(mockPredictServer.val*1000, resource.DecimalSI)) diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index 38cacc0fc1b..c44acc5f812 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -13,7 +13,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" @@ -310,7 +309,7 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error return v, nil } -func (s *prometheusScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *prometheusScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { val, err := s.ExecutePromQuery(ctx) if err != nil { s.logger.Error(err, "error executing prometheus query") diff --git a/pkg/scalers/pulsar_scaler.go b/pkg/scalers/pulsar_scaler.go index a0286bdd91b..cd68b58d344 100644 --- a/pkg/scalers/pulsar_scaler.go +++ b/pkg/scalers/pulsar_scaler.go @@ -13,7 +13,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" "github.com/kedacore/keda/v2/pkg/scalers/authentication" @@ -271,7 +270,7 @@ func (s *pulsarScaler) IsActive(ctx context.Context) (bool, error) { } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *pulsarScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *pulsarScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { msgBacklog, found, err := s.getMsgBackLog(ctx) if err != nil { return nil, fmt.Errorf("error requesting stats from url: %s", err) diff --git a/pkg/scalers/pulsar_scaler_test.go b/pkg/scalers/pulsar_scaler_test.go index 7e3f000f6a9..2146c85e9e5 100644 --- a/pkg/scalers/pulsar_scaler_test.go +++ b/pkg/scalers/pulsar_scaler_test.go @@ -276,7 +276,7 @@ func TestPulsarGetMetric(t *testing.T) { metricSpec := mockPulsarScaler.GetMetricSpecForScaling(context.TODO()) metricName := metricSpec[0].External.Metric.Name - metric, err := mockPulsarScaler.GetMetrics(context.TODO(), metricName, nil) + metric, err := mockPulsarScaler.GetMetrics(context.TODO(), metricName) if err != nil { t.Fatal("Failed:", err) } diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index dcff282cdc5..a449f060ae4 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -14,7 +14,6 @@ import ( "github.com/go-logr/logr" "github.com/streadway/amqp" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -501,7 +500,7 @@ func (s *rabbitMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpe } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *rabbitMQScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *rabbitMQScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { messages, publishRate, err := s.getQueueStatus() if err != nil { return []external_metrics.ExternalMetricValue{}, s.anonimizeRabbitMQError(err) diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go index 8f576156bbd..50a740da864 100644 --- a/pkg/scalers/redis_scaler.go +++ b/pkg/scalers/redis_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" "github.com/go-redis/redis/v8" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -251,7 +250,7 @@ func (s *redisScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics connects to Redis and finds the length of the list -func (s *redisScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *redisScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { listLen, err := s.getListLengthFn(ctx) if err != nil { diff --git a/pkg/scalers/redis_streams_scaler.go b/pkg/scalers/redis_streams_scaler.go index 6609a61bb7f..349a731cc9e 100644 --- a/pkg/scalers/redis_streams_scaler.go +++ b/pkg/scalers/redis_streams_scaler.go @@ -8,7 +8,6 @@ import ( "github.com/go-logr/logr" "github.com/go-redis/redis/v8" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -223,7 +222,7 @@ func (s *redisStreamsScaler) GetMetricSpecForScaling(context.Context) []v2.Metri } // GetMetrics fetches the number of pending entries for a consumer group in a stream -func (s *redisStreamsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *redisStreamsScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { pendingEntriesCount, err := s.getPendingEntriesCountFn(ctx) if err != nil { diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go index 210e6c6195a..8f8554df7c5 100644 --- a/pkg/scalers/scaler.go +++ b/pkg/scalers/scaler.go @@ -27,7 +27,6 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -44,7 +43,7 @@ func init() { type Scaler interface { // The scaler returns the metric values for a metric Name and criteria matching the selector - GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) + GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) // Returns the metrics based on which this scaler determines that the ScaleTarget scales. This is used to construct the HPA spec that is created for // this scaled object. The labels used should match the selectors used in GetMetrics diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index e0bf4fda887..7273d572ac2 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -14,7 +14,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -153,7 +152,7 @@ func (s *seleniumGridScaler) Close(context.Context) error { return nil } -func (s *seleniumGridScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *seleniumGridScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { sessions, err := s.getSessionsCount(ctx, s.logger) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error requesting selenium grid endpoint: %s", err) diff --git a/pkg/scalers/solace_scaler.go b/pkg/scalers/solace_scaler.go index 54ff215c0a9..91e67744a13 100644 --- a/pkg/scalers/solace_scaler.go +++ b/pkg/scalers/solace_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -348,7 +347,7 @@ func (s *SolaceScaler) getSolaceQueueMetricsFromSEMP(ctx context.Context) (Solac // INTERFACE METHOD // Call SEMP API to retrieve metrics // returns value for named metric -func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { var metricValues, mv SolaceMetricValues var mve error if mv, mve = s.getSolaceQueueMetricsFromSEMP(ctx); mve != nil { diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go index fc8cf697bb4..5dae5fe33d0 100644 --- a/pkg/scalers/stan_scaler.go +++ b/pkg/scalers/stan_scaler.go @@ -10,7 +10,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -242,7 +241,7 @@ func (s *stanScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { } // GetMetrics returns value for a supported metric and an error if there is a problem getting the metric -func (s *stanScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *stanScaler) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { req, err := http.NewRequestWithContext(ctx, "GET", s.metadata.monitoringEndpoint, nil) if err != nil { return nil, err diff --git a/pkg/scaling/cache/scalers_cache.go b/pkg/scaling/cache/scalers_cache.go index 314c361fa12..d7d3b33a275 100644 --- a/pkg/scaling/cache/scalers_cache.go +++ b/pkg/scaling/cache/scalers_cache.go @@ -24,7 +24,6 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/record" "k8s.io/metrics/pkg/apis/external_metrics" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -35,10 +34,11 @@ import ( ) type ScalersCache struct { - Generation int64 - Scalers []ScalerBuilder - Logger logr.Logger - Recorder record.EventRecorder + ScaledObject *kedav1alpha1.ScaledObject + Generation int64 + Scalers []ScalerBuilder + Logger logr.Logger + Recorder record.EventRecorder } type ScalerBuilder struct { @@ -68,11 +68,11 @@ func (c *ScalersCache) GetPushScalers() []scalers.PushScaler { return result } -func (c *ScalersCache) GetMetricsForScaler(ctx context.Context, id int, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (c *ScalersCache) GetMetricsForScaler(ctx context.Context, id int, metricName string) ([]external_metrics.ExternalMetricValue, error) { if id < 0 || id >= len(c.Scalers) { return nil, fmt.Errorf("scaler with id %d not found. Len = %d", id, len(c.Scalers)) } - m, err := c.Scalers[id].Scaler.GetMetrics(ctx, metricName, metricSelector) + m, err := c.Scalers[id].Scaler.GetMetrics(ctx, metricName) if err == nil { return m, nil } @@ -82,7 +82,7 @@ func (c *ScalersCache) GetMetricsForScaler(ctx context.Context, id int, metricNa return nil, err } - return ns.GetMetrics(ctx, metricName, metricSelector) + return ns.GetMetrics(ctx, metricName) } func (c *ScalersCache) IsScaledObjectActive(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject) (bool, bool, []external_metrics.ExternalMetricValue) { @@ -180,16 +180,17 @@ func (c *ScalersCache) IsScaledJobActive(ctx context.Context, scaledJob *kedav1a return isActive, ceilToInt64(queueLength), ceilToInt64(maxValue) } -func (c *ScalersCache) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +// TODO this is probably not needed, revisit whole package +func (c *ScalersCache) GetMetrics(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, error) { var metrics []external_metrics.ExternalMetricValue for i, s := range c.Scalers { - m, err := s.Scaler.GetMetrics(ctx, metricName, metricSelector) + m, err := s.Scaler.GetMetrics(ctx, metricName) if err != nil { ns, err := c.refreshScaler(ctx, i) if err != nil { return metrics, err } - m, err = ns.GetMetrics(ctx, metricName, metricSelector) + m, err = ns.GetMetrics(ctx, metricName) if err != nil { return metrics, err } @@ -246,7 +247,9 @@ type scalerMetrics struct { isActive bool } +// TODO needs refactor func (c *ScalersCache) getScaledJobMetrics(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) []scalerMetrics { + // TODO this loop should be probably done similar way the ScaledObject loop is done var scalersMetrics []scalerMetrics for i, s := range c.Scalers { var queueLength float64 @@ -282,7 +285,8 @@ func (c *ScalersCache) getScaledJobMetrics(ctx context.Context, scaledJob *kedav targetAverageValue = getTargetAverageValue(metricSpecs) - metrics, err := s.Scaler.GetMetrics(ctx, metricSpecs[0].External.Metric.Name, nil) + // TODO this should probably be `cache.GetMetricsForScaler(ctx, scalerIndex, metricName)` + metrics, err := s.Scaler.GetMetrics(ctx, metricSpecs[0].External.Metric.Name) if err != nil { scalerLogger.V(1).Info("Error getting scaler metrics, but continue", "Error", err) c.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) diff --git a/pkg/scaling/cache/scalers_cache_test.go b/pkg/scaling/cache/scalers_cache_test.go index 10d651f46ac..fd73a8a01ab 100644 --- a/pkg/scaling/cache/scalers_cache_test.go +++ b/pkg/scaling/cache/scalers_cache_test.go @@ -280,7 +280,7 @@ func createScaler(ctrl *gomock.Controller, queueLength int64, averageValue int64 } scaler.EXPECT().IsActive(gomock.Any()).Return(isActive, nil) scaler.EXPECT().GetMetricSpecForScaling(gomock.Any()).Return(metricsSpecs) - scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Any(), nil).Return(metrics, nil) + scaler.EXPECT().GetMetrics(gomock.Any(), gomock.Any()).Return(metrics, nil) scaler.EXPECT().Close(gomock.Any()) return scaler } diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 4bde8291454..cb057644346 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -19,6 +19,7 @@ package scaling import ( "context" "fmt" + "strings" "sync" "time" @@ -28,11 +29,15 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/scale" "k8s.io/client-go/tools/record" + "k8s.io/metrics/pkg/apis/external_metrics" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventreason" + "github.com/kedacore/keda/v2/pkg/fallback" + metricsserviceapi "github.com/kedacore/keda/v2/pkg/metricsservice/api" + "github.com/kedacore/keda/v2/pkg/prommetrics" "github.com/kedacore/keda/v2/pkg/scalers" "github.com/kedacore/keda/v2/pkg/scaling/cache" "github.com/kedacore/keda/v2/pkg/scaling/executor" @@ -46,6 +51,8 @@ type ScaleHandler interface { DeleteScalableObject(ctx context.Context, scalableObject interface{}) error GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) ClearScalersCache(ctx context.Context, scalableObject interface{}) error + + GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) } type scaleHandler struct { @@ -80,7 +87,7 @@ func (h *scaleHandler) HandleScalableObject(ctx context.Context, scalableObject return err } - key := withTriggers.GenerateIdenitifier() + key := withTriggers.GenerateIdentifier() ctx, cancel := context.WithCancel(ctx) // cancel the outdated ScaleLoop for the same ScaledObject (if exists) @@ -117,7 +124,7 @@ func (h *scaleHandler) DeleteScalableObject(ctx context.Context, scalableObject return err } - key := withTriggers.GenerateIdenitifier() + key := withTriggers.GenerateIdentifier() result, ok := h.scaleLoopContexts.Load(key) if ok { cancel, ok := result.(context.CancelFunc) @@ -163,13 +170,62 @@ func (h *scaleHandler) startScaleLoop(ctx context.Context, withTriggers *kedav1a } } +func (h *scaleHandler) getScalersCacheForScaledObject(ctx context.Context, scaledObjectName, scaledObjectNamespace string) (*cache.ScalersCache, error) { + key := kedav1alpha1.GenerateIdentifier("ScaledObject", scaledObjectNamespace, scaledObjectName) + + h.lock.RLock() + if cache, ok := h.scalerCaches[key]; ok { + h.lock.RUnlock() + return cache, nil + } + h.lock.RUnlock() + + h.lock.Lock() + defer h.lock.Unlock() + if cache, ok := h.scalerCaches[key]; ok { + return cache, nil + } + + scaledObject := &kedav1alpha1.ScaledObject{} + err := h.client.Get(ctx, types.NamespacedName{Name: scaledObjectName, Namespace: scaledObjectNamespace}, scaledObject) + if err != nil { + h.logger.Error(err, "failed to get ScaledObject", "name", scaledObjectName, "namespace", scaledObjectNamespace) + return nil, err + } + + podTemplateSpec, containerName, err := resolver.ResolveScaleTargetPodSpec(ctx, h.client, h.logger, scaledObject) + if err != nil { + return nil, err + } + + withTriggers, err := asDuckWithTriggers(scaledObject) + if err != nil { + return nil, err + } + + scalers, err := h.buildScalers(ctx, withTriggers, podTemplateSpec, containerName) + if err != nil { + return nil, err + } + + h.scalerCaches[key] = &cache.ScalersCache{ + ScaledObject: scaledObject, + Scalers: scalers, + Logger: h.logger, + Recorder: h.recorder, + } + + return h.scalerCaches[key], nil +} + +// TODO refactor this together with GetScalersCacheForScaledObject() func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject interface{}) (*cache.ScalersCache, error) { withTriggers, err := asDuckWithTriggers(scalableObject) if err != nil { return nil, err } - key := withTriggers.GenerateIdenitifier() + key := withTriggers.GenerateIdentifier() h.lock.RLock() if cache, ok := h.scalerCaches[key]; ok && cache.Generation == withTriggers.Generation { @@ -196,12 +252,19 @@ func (h *scaleHandler) GetScalersCache(ctx context.Context, scalableObject inter return nil, err } - h.scalerCaches[key] = &cache.ScalersCache{ + newCache := &cache.ScalersCache{ Generation: withTriggers.Generation, Scalers: scalers, Logger: h.logger, Recorder: h.recorder, } + switch obj := scalableObject.(type) { + case *kedav1alpha1.ScaledObject: + newCache.ScaledObject = obj + default: + } + + h.scalerCaches[key] = newCache return h.scalerCaches[key], nil } @@ -212,12 +275,12 @@ func (h *scaleHandler) ClearScalersCache(ctx context.Context, scalableObject int return err } - key := withTriggers.GenerateIdenitifier() + key := withTriggers.GenerateIdentifier() h.lock.Lock() defer h.lock.Unlock() - if cache, ok := h.scalerCaches[key]; ok { + h.logger.V(1).WithValues("key", key).Info("Removing entry from ScalersCache") cache.Close(ctx) delete(h.scalerCaches, key) } @@ -288,6 +351,109 @@ func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interfac } } +func (h *scaleHandler) GetExternalMetrics(ctx context.Context, scaledObjectName, scaledObjectNamespace, metricName string) (*external_metrics.ExternalMetricValueList, *metricsserviceapi.PromMetricsMsg, error) { + var matchingMetrics []external_metrics.ExternalMetricValue + + exportedPromMetrics := metricsserviceapi.PromMetricsMsg{ + ScaledObjectErr: false, + ScalerMetric: []*metricsserviceapi.ScalerMetricMsg{}, + ScalerError: []*metricsserviceapi.ScalerErrorMsg{}, + } + + cache, err := h.getScalersCacheForScaledObject(ctx, scaledObjectName, scaledObjectNamespace) + prommetrics.RecordScaledObjectError(scaledObjectNamespace, scaledObjectName, err) + + // [DEPRECATED] handle exporting Prometheus metrics from Operator to Metrics Server + exportedPromMetrics.ScaledObjectErr = (err != nil) + + if err != nil { + return nil, &exportedPromMetrics, fmt.Errorf("error when getting scalers %s", err) + } + + var scaledObject *kedav1alpha1.ScaledObject + if cache.ScaledObject != nil { + scaledObject = cache.ScaledObject + } else { + err := fmt.Errorf("scaledObject not found in the cache") + h.logger.Error(err, "scaledObject not found in the cache", "name", scaledObjectName, "namespace", scaledObjectNamespace) + return nil, &exportedPromMetrics, err + } + + // let's check metrics for all scalers in a ScaledObject + scalerError := false + scalers, scalerConfigs := cache.GetScalers() + + h.logger.V(1).WithValues("name", scaledObjectName, "namespace", scaledObjectNamespace, "metricName", metricName, "scalers", scalers).Info("Getting metric value") + + for scalerIndex := 0; scalerIndex < len(scalers); scalerIndex++ { + metricSpecs := scalers[scalerIndex].GetMetricSpecForScaling(ctx) + scalerName := strings.Replace(fmt.Sprintf("%T", scalers[scalerIndex]), "*scalers.", "", 1) + if scalerConfigs[scalerIndex].TriggerName != "" { + scalerName = scalerConfigs[scalerIndex].TriggerName + } + + for _, metricSpec := range metricSpecs { + // skip cpu/memory resource scaler + if metricSpec.External == nil { + continue + } + + // Filter only the desired metric + if strings.EqualFold(metricSpec.External.Metric.Name, metricName) { + metrics, err := cache.GetMetricsForScaler(ctx, scalerIndex, metricName) + metrics, err = fallback.GetMetricsWithFallback(ctx, h.client, h.logger, metrics, err, metricName, scaledObject, metricSpec) + if err != nil { + scalerError = true + h.logger.Error(err, "error getting metric for scaler", "scaledObject.Namespace", scaledObjectNamespace, "scaledObject.Name", scaledObjectName, "scaler", scalerName) + } else { + for _, metric := range metrics { + metricValue := metric.Value.AsApproximateFloat64() + prommetrics.RecordScalerMetric(scaledObjectNamespace, scaledObjectName, scalerName, scalerIndex, metric.MetricName, metricValue) + + // [DEPRECATED] handle exporting Prometheus metrics from Operator to Metrics Server + scalerMetricMsg := metricsserviceapi.ScalerMetricMsg{ + ScalerName: scalerName, + ScalerIndex: int32(scalerIndex), + MetricName: metricName, + MetricValue: float32(metricValue), + } + exportedPromMetrics.ScalerMetric = append(exportedPromMetrics.ScalerMetric, &scalerMetricMsg) + } + matchingMetrics = append(matchingMetrics, metrics...) + } + prommetrics.RecordScalerError(scaledObjectNamespace, scaledObjectName, scalerName, scalerIndex, metricName, err) + + // [DEPRECATED] handle exporting Prometheus metrics from Operator to Metrics Server + scalerErrMsg := metricsserviceapi.ScalerErrorMsg{ + ScalerName: scalerName, + ScalerIndex: int32(scalerIndex), + MetricName: metricName, + Error: (err != nil), + } + exportedPromMetrics.ScalerError = append(exportedPromMetrics.ScalerError, &scalerErrMsg) + } + } + } + + // invalidate the cache for the ScaledObject, if we hit an error in any scaler + // in this case we try to build all scalers (and resolve all secrets/creds) again in the next call + if scalerError { + err := h.ClearScalersCache(ctx, scaledObject) + if err != nil { + h.logger.Error(err, "error clearing scalers cache") + } + h.logger.V(1).Info("scaler error encountered, clearing scaler cache") + } + + if len(matchingMetrics) == 0 { + return nil, &exportedPromMetrics, fmt.Errorf("no matching metrics found for " + metricName) + } + + return &external_metrics.ExternalMetricValueList{ + Items: matchingMetrics, + }, &exportedPromMetrics, nil +} + // buildScalers returns list of Scalers for the specified triggers func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alpha1.WithTriggers, podTemplateSpec *corev1.PodTemplateSpec, containerName string) ([]cache.ScalerBuilder, error) { logger := h.logger.WithValues("type", withTriggers.Kind, "namespace", withTriggers.Namespace, "name", withTriggers.Name) diff --git a/pkg/util/env_resolver.go b/pkg/util/env_resolver.go index 0fe32ed1f24..21e6647ac2e 100644 --- a/pkg/util/env_resolver.go +++ b/pkg/util/env_resolver.go @@ -1,3 +1,19 @@ +/* +Copyright 2022 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package util import ( @@ -6,33 +22,33 @@ import ( "time" ) -func ResolveOsEnvInt(envName string, defaultValue int) (int, error) { +func ResolveOsEnvBool(envName string, defaultValue bool) (bool, error) { valueStr, found := os.LookupEnv(envName) if found && valueStr != "" { - return strconv.Atoi(valueStr) + return strconv.ParseBool(valueStr) } return defaultValue, nil } -func ResolveOsEnvDuration(envName string) (*time.Duration, error) { +func ResolveOsEnvInt(envName string, defaultValue int) (int, error) { valueStr, found := os.LookupEnv(envName) if found && valueStr != "" { - value, err := time.ParseDuration(valueStr) - return &value, err + return strconv.Atoi(valueStr) } - return nil, nil + return defaultValue, nil } -func ResolveOsEnvBool(envName string, defaultValue bool) (bool, error) { +func ResolveOsEnvDuration(envName string) (*time.Duration, error) { valueStr, found := os.LookupEnv(envName) if found && valueStr != "" { - return strconv.ParseBool(valueStr) + value, err := time.ParseDuration(valueStr) + return &value, err } - return defaultValue, nil + return nil, nil } diff --git a/tests/internals/prometheus_metrics/prometheus_metrics_test.go b/tests/internals/prometheus_metrics/prometheus_metrics_test.go index bdb5d7ed354..24e142e7658 100644 --- a/tests/internals/prometheus_metrics/prometheus_metrics_test.go +++ b/tests/internals/prometheus_metrics/prometheus_metrics_test.go @@ -24,13 +24,13 @@ const ( ) var ( - testNamespace = fmt.Sprintf("%s-ns", testName) - deploymentName = fmt.Sprintf("%s-deployment", testName) - monitoredDeploymentName = fmt.Sprintf("%s-monitored", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) - cronScaledJobName = fmt.Sprintf("%s-cron-sj", testName) - clientName = fmt.Sprintf("%s-client", testName) - serviceName = fmt.Sprintf("%s-service", testName) + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + monitoredDeploymentName = fmt.Sprintf("%s-monitored", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + cronScaledJobName = fmt.Sprintf("%s-cron-sj", testName) + clientName = fmt.Sprintf("%s-client", testName) + kedaOperatorPrometheusURL = "http://keda-operator.keda.svc.cluster.local:8080/metrics" ) type templateData struct { @@ -41,7 +41,6 @@ type templateData struct { CronScaledJobName string MonitoredDeploymentName string ClientName string - ServiceName string } const ( @@ -165,21 +164,6 @@ spec: - -c - "exec tail -f /dev/null"` - serviceTemplate = ` -apiVersion: v1 -kind: Service -metadata: - name: {{.ServiceName}} - namespace: keda -spec: - ports: - - name: metrics - port: 8080 - targetPort: 8080 - selector: - app: keda-operator -` - authenticationTemplate = ` apiVersion: v1 kind: Secret @@ -240,7 +224,8 @@ func TestScaler(t *testing.T) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), "replica count should be 2 after 2 minute") - testHPAScalerMetricValue(t) + testScalerMetricValue(t) + testMetricsServerScalerMetricValue(t) testOperatorMetrics(t, kc, data) // cleanup @@ -255,14 +240,12 @@ func getTemplateData() (templateData, []Template) { ScaledObjectName: scaledObjectName, MonitoredDeploymentName: monitoredDeploymentName, ClientName: clientName, - ServiceName: serviceName, CronScaledJobName: cronScaledJobName, }, []Template{ {Name: "deploymentTemplate", Config: deploymentTemplate}, {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, {Name: "clientTemplate", Config: clientTemplate}, - {Name: "serviceTemplate", Config: serviceTemplate}, {Name: "authenticatioNTemplate", Config: authenticationTemplate}, } } @@ -280,8 +263,32 @@ func fetchAndParsePrometheusMetrics(t *testing.T, cmd string) map[string]*promMo return families } -func testHPAScalerMetricValue(t *testing.T) { - t.Log("--- testing hpa scaler metric value ---") +func testScalerMetricValue(t *testing.T) { + t.Log("--- testing scaler metric value ---") + + family := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorPrometheusURL)) + + if val, ok := family["keda_scaler_metrics_value"]; ok { + var found bool + metrics := val.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == "scaledObject" && *label.Value == scaledObjectName { + assert.Equal(t, float64(4), *metric.Gauge.Value) + found = true + } + } + } + assert.Equal(t, true, found) + } else { + t.Errorf("metric not available") + } +} + +// [DEPRECATED] handle exporting Prometheus metrics from Operator to Metrics Server +func testMetricsServerScalerMetricValue(t *testing.T) { + t.Log("--- testing scaler metric value in metrics server ---") family := fetchAndParsePrometheusMetrics(t, "curl --insecure http://keda-metrics-apiserver.keda:9022/metrics") @@ -375,7 +382,7 @@ func getOperatorMetricsManually(t *testing.T, kc *kubernetes.Clientset) (map[str } func testOperatorMetricValues(t *testing.T, kc *kubernetes.Clientset) { - families := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure http://%s.keda:8080/metrics", serviceName)) + families := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorPrometheusURL)) expectedTriggerTotals, expectedCrTotals := getOperatorMetricsManually(t, kc) checkTriggerTotalValues(t, families, expectedTriggerTotals) @@ -385,7 +392,7 @@ func testOperatorMetricValues(t *testing.T, kc *kubernetes.Clientset) { func checkTriggerTotalValues(t *testing.T, families map[string]*promModel.MetricFamily, expected map[string]int) { t.Log("--- testing trigger total metrics ---") - family, ok := families["keda_operator_trigger_totals"] + family, ok := families["keda_trigger_totals"] if !ok { t.Errorf("metric not available") return @@ -414,7 +421,7 @@ func checkTriggerTotalValues(t *testing.T, families map[string]*promModel.Metric func checkCRTotalValues(t *testing.T, families map[string]*promModel.MetricFamily, expected map[string]map[string]int) { t.Log("--- testing resource total metrics ---") - family, ok := families["keda_operator_resource_totals"] + family, ok := families["keda_resource_totals"] if !ok { t.Errorf("metric not available") return