-
Notifications
You must be signed in to change notification settings - Fork 24
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add ability to configure a pod topology spread constraint #37
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -71,6 +71,8 @@ func deployment(es *egressv1.ExternalService, configHash string) *appsv1.Deploym | |
img = i | ||
} | ||
|
||
labelSelector := metav1.SetAsLabelSelector(labelsToSelect(es)) | ||
|
||
var tolerations []corev1.Toleration | ||
tk, kok := os.LookupEnv("TAINT_TOLERATION_KEY") | ||
tv, vok := os.LookupEnv("TAINT_TOLERATION_VALUE") | ||
|
@@ -91,6 +93,47 @@ func deployment(es *egressv1.ExternalService, configHash string) *appsv1.Deploym | |
} | ||
} | ||
|
||
var podTopologySpread []corev1.TopologySpreadConstraint | ||
topologyEnable, _ := os.LookupEnv("ENABLE_POD_TOPOLOGY_SPREAD") | ||
if topologyEnable == "true" { | ||
zoneSkew, zoneEnabled := os.LookupEnv("POD_TOPOLOGY_ZONE_MAX_SKEW") | ||
zoneKey, zoneKeyFound := os.LookupEnv("POD_TOPOLOGY_ZONE_MAX_SKEW_KEY") | ||
if zoneEnabled { | ||
maxSkew, err := strconv.Atoi(zoneSkew) | ||
if err != nil { | ||
maxSkew = 1 | ||
} | ||
// Default zone key to the Kubernetes topology one if not specified | ||
if !zoneKeyFound { | ||
zoneKey = "topology.kubernetes.io/zone" | ||
} | ||
podTopologySpread = append(podTopologySpread, corev1.TopologySpreadConstraint{ | ||
TopologyKey: zoneKey, | ||
WhenUnsatisfiable: corev1.ScheduleAnyway, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this field so that we schedule a pod even if none of the rules match? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this stops us not scheduling pods for when an AZ goes down or there's a small amount of nodes (usually in a nonprod environment) |
||
MaxSkew: int32(maxSkew), | ||
LabelSelector: labelSelector, | ||
}) | ||
} | ||
hostnameSkew, hostnameEnabled := os.LookupEnv("POD_TOPOLOGY_HOSTNAME_MAX_SKEW") | ||
hostnameKey, hostnameKeyFound := os.LookupEnv("POD_TOPOLOGY_HOSTNAME_MAX_SKEW_KEY") | ||
if hostnameEnabled { | ||
maxSkew, err := strconv.Atoi(hostnameSkew) | ||
if err != nil { | ||
maxSkew = 1 | ||
} | ||
// Default zone key to the Kubernetes topology one if not specified | ||
if !hostnameKeyFound { | ||
hostnameKey = "kubernetes.io/hostname" | ||
} | ||
podTopologySpread = append(podTopologySpread, corev1.TopologySpreadConstraint{ | ||
TopologyKey: hostnameKey, | ||
WhenUnsatisfiable: corev1.ScheduleAnyway, | ||
MaxSkew: int32(maxSkew), | ||
LabelSelector: labelSelector, | ||
}) | ||
} | ||
} | ||
|
||
var resources corev1.ResourceRequirements | ||
if es.Spec.Resources != nil { | ||
resources = *es.Spec.Resources | ||
|
@@ -124,15 +167,16 @@ func deployment(es *egressv1.ExternalService, configHash string) *appsv1.Deploym | |
MaxSurge: intstr.ValueOrDefault(nil, intstr.FromString("25%")), | ||
}, | ||
}, | ||
Selector: metav1.SetAsLabelSelector(labelsToSelect(es)), | ||
Selector: labelSelector, | ||
Template: corev1.PodTemplateSpec{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Labels: labels(es), | ||
Annotations: a, | ||
}, | ||
Spec: corev1.PodSpec{ | ||
Tolerations: tolerations, | ||
NodeSelector: nodeSelector, | ||
Tolerations: tolerations, | ||
NodeSelector: nodeSelector, | ||
TopologySpreadConstraints: podTopologySpread, | ||
Containers: []corev1.Container{ | ||
{ | ||
Name: "gateway", | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
when would be the case where we want to specify a different zone key?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There is an old zone key Kubernetes used to use or the user's cluster might have a different zone topology