From 0c52a4083c495d392695b6eb6d9216c70a35df39 Mon Sep 17 00:00:00 2001 From: Andrew Daniels Date: Tue, 16 Jan 2024 17:20:42 -0500 Subject: [PATCH] [TRB-54994]: Fixed a few issues that could cause resize actions to fail in namespaces with a LimitRange. (#91) * [TRB-54994]: Fixed a few issues that could cause resize actions to fail in namespaces with a LimitRange. * Added comments indicating that we have updated code taken from upstream Kubernetes. --- pkg/action/executor/limitrange_util.go | 51 +++++++++++-------- .../executor/workload_controller_resizer.go | 8 +-- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/pkg/action/executor/limitrange_util.go b/pkg/action/executor/limitrange_util.go index 02dd78d5d..3633d1003 100644 --- a/pkg/action/executor/limitrange_util.go +++ b/pkg/action/executor/limitrange_util.go @@ -155,16 +155,18 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *corev1.Pod) error return utilerrors.NewAggregate(errs) } +// =========================================================================== +// NOTE: This method has deviated from the upstream kubernetes code. +// If we ever pull the latest changes from upstream, we may need to manually +// update this method. +// =========================================================================== // minConstraint enforces the min constraint over the specified resource func minConstraint(limitType string, resourceName string, enforced resource.Quantity, request corev1.ResourceList, limit corev1.ResourceList) error { req, reqExists := request[corev1.ResourceName(resourceName)] lim, limExists := limit[corev1.ResourceName(resourceName)] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) - if !reqExists { - return fmt.Errorf("minimum %s usage per %s is %s. No request is specified", resourceName, limitType, enforced.String()) - } - if observedReqValue < enforcedValue { + if reqExists && observedReqValue < enforcedValue { return fmt.Errorf("minimum %s usage per %s is %s, but request is %s", resourceName, limitType, enforced.String(), req.String()) } if limExists && (observedLimValue < enforcedValue) { @@ -173,16 +175,18 @@ func minConstraint(limitType string, resourceName string, enforced resource.Quan return nil } +// =========================================================================== +// NOTE: This method has deviated from the upstream kubernetes code. +// If we ever pull the latest changes from upstream, we may need to manually +// update this method. +// =========================================================================== // maxConstraint enforces the max constraint over the specified resource func maxConstraint(limitType string, resourceName string, enforced resource.Quantity, request corev1.ResourceList, limit corev1.ResourceList) error { req, reqExists := request[corev1.ResourceName(resourceName)] lim, limExists := limit[corev1.ResourceName(resourceName)] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) - if !limExists { - return fmt.Errorf("maximum %s usage per %s is %s. No limit is specified", resourceName, limitType, enforced.String()) - } - if observedLimValue > enforcedValue { + if limExists && observedLimValue > enforcedValue { return fmt.Errorf("maximum %s usage per %s is %s, but limit is %s", resourceName, limitType, enforced.String(), lim.String()) } if reqExists && (observedReqValue > enforcedValue) { @@ -315,24 +319,27 @@ func mergePodResourceRequirements(pod *corev1.Pod, defaultRequirements *corev1.R } } +// =========================================================================== +// NOTE: This method has deviated from the upstream kubernetes code. +// If we ever pull the latest changes from upstream, we may need to manually +// update this method. +// =========================================================================== // mergeContainerResources handles defaulting all of the resources on a container. func mergeContainerResources(container *corev1.Container, defaultRequirements *corev1.ResourceRequirements) { - if container.Resources.Limits == nil { - container.Resources.Limits = corev1.ResourceList{} - } - if container.Resources.Requests == nil { - container.Resources.Requests = corev1.ResourceList{} - } - for k, v := range defaultRequirements.Limits { - _, found := container.Resources.Limits[k] - if !found { - container.Resources.Limits[k] = v.DeepCopy() + if container.Resources.Limits != nil && len(container.Resources.Limits) > 0 { + for k, v := range defaultRequirements.Limits { + _, found := container.Resources.Limits[k] + if !found { + container.Resources.Limits[k] = v.DeepCopy() + } } } - for k, v := range defaultRequirements.Requests { - _, found := container.Resources.Requests[k] - if !found { - container.Resources.Requests[k] = v.DeepCopy() + if container.Resources.Requests != nil && len(container.Resources.Requests) > 0 { + for k, v := range defaultRequirements.Requests { + _, found := container.Resources.Requests[k] + if !found { + container.Resources.Requests[k] = v.DeepCopy() + } } } } diff --git a/pkg/action/executor/workload_controller_resizer.go b/pkg/action/executor/workload_controller_resizer.go index f48e8a676..77acfb076 100644 --- a/pkg/action/executor/workload_controller_resizer.go +++ b/pkg/action/executor/workload_controller_resizer.go @@ -310,16 +310,16 @@ func buildDesiredPod4QuotaEvaluation(namespace string, resizeSpecs []*containerR if len(spec.NewCapacity) > 0 && len(podSpec.Containers[spec.Index].Resources.Limits) == 0 { podSpec.Containers[spec.Index].Resources.Limits = make(k8sapi.ResourceList) } - if newVal, found := spec.NewRequest[k8sapi.ResourceCPU]; found { + if newVal, found := spec.NewRequest[k8sapi.ResourceCPU]; found && !newVal.IsZero() { podSpec.Containers[spec.Index].Resources.Requests[k8sapi.ResourceCPU] = newVal } - if newVal, found := spec.NewRequest[k8sapi.ResourceMemory]; found { + if newVal, found := spec.NewRequest[k8sapi.ResourceMemory]; found && !newVal.IsZero() { podSpec.Containers[spec.Index].Resources.Requests[k8sapi.ResourceMemory] = newVal } - if newVal, found := spec.NewCapacity[k8sapi.ResourceCPU]; found { + if newVal, found := spec.NewCapacity[k8sapi.ResourceCPU]; found && !newVal.IsZero() { podSpec.Containers[spec.Index].Resources.Limits[k8sapi.ResourceCPU] = newVal } - if newVal, found := spec.NewCapacity[k8sapi.ResourceMemory]; found { + if newVal, found := spec.NewCapacity[k8sapi.ResourceMemory]; found && !newVal.IsZero() { podSpec.Containers[spec.Index].Resources.Limits[k8sapi.ResourceMemory] = newVal } }