Skip to content

Commit

Permalink
Pass through Pod Affinity Rules
Browse files Browse the repository at this point in the history
Allow Pod Affinity Rules to be defined in the Limitador CR which will be past to the Limitador deployment CR and place in `spec.template.spec.affinity`.
  • Loading branch information
Boomatang committed Aug 16, 2023
1 parent e110bb7 commit 36d82db
Show file tree
Hide file tree
Showing 9 changed files with 1,767 additions and 31 deletions.
3 changes: 3 additions & 0 deletions api/v1alpha1/limitador_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ type LimitadorSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file

// +optional
Affinity *corev1.Affinity `json:"affinity,omitempty"`

// +optional
Replicas *int `json:"replicas,omitempty"`

Expand Down
5 changes: 5 additions & 0 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

828 changes: 828 additions & 0 deletions bundle/manifests/limitador.kuadrant.io_limitadors.yaml

Large diffs are not rendered by default.

828 changes: 828 additions & 0 deletions config/crd/bases/limitador.kuadrant.io_limitadors.yaml

Large diffs are not rendered by default.

60 changes: 30 additions & 30 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,35 +25,35 @@ spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /manager
args:
- --leader-elect
env:
- name: RELATED_IMAGE_LIMITADOR
value: "quay.io/kuadrant/limitador:latest"
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 300Mi
requests:
cpu: 200m
memory: 200Mi
- command:
- /manager
args:
- --leader-elect
env:
- name: RELATED_IMAGE_LIMITADOR
value: "quay.io/kuadrant/limitador:latest"
image: controller:latest
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 300Mi
requests:
cpu: 200m
memory: 200Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
1 change: 1 addition & 0 deletions controllers/limitador_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ func (r *LimitadorReconciler) reconcileDeployment(ctx context.Context, limitador
deploymentMutators = append(deploymentMutators,
reconcilers.DeploymentImageMutator,
reconcilers.DeploymentCommandMutator,
reconcilers.DeploymentAffinityMutator,
)

deployment := limitador.Deployment(limitadorObj, storageConfigSecret)
Expand Down
63 changes: 62 additions & 1 deletion controllers/limitador_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,23 @@ var _ = Describe("Limitador controller", func() {
version := LimitadorVersion
httpPort := &limitadorv1alpha1.TransportProtocol{Port: &httpPortNumber}
grpcPort := &limitadorv1alpha1.TransportProtocol{Port: &grpcPortNumber}
affinity := &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
{
Weight: 100,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": "limitador",
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
}

limits := []limitadorv1alpha1.RateLimit{
{
Expand Down Expand Up @@ -74,6 +91,7 @@ var _ = Describe("Limitador controller", func() {
Spec: limitadorv1alpha1.LimitadorSpec{
Replicas: &replicas,
Version: &version,
Affinity: affinity,
Listener: &limitadorv1alpha1.Listener{
HTTP: httpPort,
GRPC: grpcPort,
Expand Down Expand Up @@ -169,6 +187,9 @@ var _ = Describe("Limitador controller", func() {
},
),
)
Expect(createdLimitadorDeployment.Spec.Template.Spec.Affinity).Should(
Equal(affinity),
)
})

It("Should create a Limitador service", func() {
Expand Down Expand Up @@ -259,6 +280,8 @@ var _ = Describe("Limitador controller", func() {
updatedLimitador.Spec.Replicas = &replicas
version = "latest"
updatedLimitador.Spec.Version = &version
affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight = 99
updatedLimitador.Spec.Affinity = affinity

Expect(k8sClient.Update(context.TODO(), &updatedLimitador)).Should(Succeed())
updatedLimitadorDeployment := appsv1.Deployment{}
Expand All @@ -277,8 +300,46 @@ var _ = Describe("Limitador controller", func() {

correctReplicas := *updatedLimitadorDeployment.Spec.Replicas == LimitadorReplicas+1
correctImage := updatedLimitadorDeployment.Spec.Template.Spec.Containers[0].Image == LimitadorImage+":latest"
correctAffinity := updatedLimitadorDeployment.Spec.Template.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight == 99

return correctReplicas && correctImage && correctAffinity
}, timeout, interval).Should(BeTrue())
})

It("Should modify limitador deployments if nil object set", func() {
updatedLimitador := limitadorv1alpha1.Limitador{}
Eventually(func() bool {
err := k8sClient.Get(
context.TODO(),
types.NamespacedName{
Namespace: LimitadorNamespace,
Name: limitadorObj.Name,
},
&updatedLimitador)

return err == nil
}, timeout, interval).Should(BeTrue())

updatedLimitador.Spec.Affinity = nil

Expect(k8sClient.Update(context.TODO(), &updatedLimitador)).Should(Succeed())
updatedLimitadorDeployment := appsv1.Deployment{}
Eventually(func() bool {
err := k8sClient.Get(
context.TODO(),
types.NamespacedName{
Namespace: LimitadorNamespace,
Name: limitadorObj.Name,
},
&updatedLimitadorDeployment)

if err != nil {
return false
}

correctAffinity := updatedLimitadorDeployment.Spec.Template.Spec.Affinity == nil

return correctReplicas && correctImage
return correctAffinity
}, timeout, interval).Should(BeTrue())
})

Expand Down
1 change: 1 addition & 0 deletions pkg/limitador/k8s_objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ func Deployment(limitador *limitadorv1alpha1.Limitador, storageConfigSecret *v1.
Labels: labels(),
},
Spec: v1.PodSpec{
Affinity: limitador.Spec.Affinity,
Containers: []v1.Container{
{
Name: "limitador",
Expand Down
9 changes: 9 additions & 0 deletions pkg/reconcilers/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,15 @@ func DeploymentMutator(opts ...DeploymentMutateFn) MutateFn {
}
}

func DeploymentAffinityMutator(desired, existing *appsv1.Deployment) bool {
update := false
if !reflect.DeepEqual(existing.Spec.Template.Spec.Affinity, desired.Spec.Template.Spec.Affinity) {
existing.Spec.Template.Spec.Affinity = desired.Spec.Template.Spec.Affinity
update = true
}
return update
}

func DeploymentReplicasMutator(desired, existing *appsv1.Deployment) bool {
update := false

Expand Down

0 comments on commit 36d82db

Please sign in to comment.