diff --git a/helm/cluster-aws/templates/aws-nth-helmrelease.yaml b/helm/cluster-aws/templates/aws-nth-helmrelease.yaml index 7533723a..236d6cc6 100644 --- a/helm/cluster-aws/templates/aws-nth-helmrelease.yaml +++ b/helm/cluster-aws/templates/aws-nth-helmrelease.yaml @@ -1,6 +1,31 @@ {{/* Default Helm values for the app */}} {{/* See schema for the appropriate app version here https://github.com/giantswarm/aws-nth-bundle/blob/main/helm/aws-nth-bundle/values.schema.json */}} {{- define "defaultAwsNodeTerminationHandlerHelmValues" }} +awsNodeTerminationHandler: + values: + image: + registry: {{ include "awsContainerImageRegistry" $ }} + + # Allow running on control plane nodes. On deletion, CAPI will first delete the worker nodes + # and we still want aws-node-termination-handler to take care of the last workers' EC2 lifecycle + # hooks since they otherwise won't be completed, resulting in unnecessary waiting time before + # AWS can terminate the instances + # (see `AWSMachinePool.spec.lifecycleHooks["aws-node-termination-handler"].heartbeatTimeout`). + # This runs on workers by default but allows moving pods to control plane nodes. Requires + # queue processing mode i.e. running as `Deployment`, not `DaemonSet`. + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + weight: 10 + tolerations: + - effect: NoSchedule + operator: Exists + key: node-role.kubernetes.io/control-plane + clusterID: {{ include "resource.default.name" $ }} {{- if (.Values.global.connectivity.proxy).enabled }} proxy: