Skip to content

Commit

Permalink
Change format for defining node pools to allow dynamically creating n…
Browse files Browse the repository at this point in the history
…ew node pools
  • Loading branch information
hypesystem committed May 9, 2024
1 parent 674bd98 commit a22d573
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 38 deletions.
6 changes: 4 additions & 2 deletions infrastructure/environments/dplplat01/infrastructure/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@ module "environment" {
# <environment_name>.dpl.reload.dk
lagoon_domain_base = "dplplat01.dpl.reload.dk"
random_seed = "LahYegheePhohGeew9Fa"
node_pools = {
"appdefault": { min: 8, max: 40, vm: "Standard_B4ms" },
"admin": { min: 3, max: 6, vm: "Standard_B4ms" }
}
node_pool_system_count = 2
node_pool_app_default_count_min = 8
node_pool_app_default_count_max = 40
# We've increased this quite a bit to test performance. The ideal starting-
# point seems to be in the range 102400 - 204800 to get enough IOPS to
# maintain performance during a Drupal site-install.
Expand Down
44 changes: 8 additions & 36 deletions infrastructure/terraform/modules/dpl-platform-environment/aks.tf
Original file line number Diff line number Diff line change
Expand Up @@ -60,38 +60,10 @@ resource "azurerm_kubernetes_cluster" "cluster" {
}
}

# Add a nodepool for administrative workloads
resource "azurerm_kubernetes_cluster_node_pool" "admin" {
name = "admin"
kubernetes_cluster_id = azurerm_kubernetes_cluster.cluster.id
vnet_subnet_id = azurerm_subnet.aks.id
node_labels = {
"noderole.dplplatform" : "admin"
}
zones = [
"1",
]

vm_size = var.node_pool_admin_vm_sku

# Enable autoscaling.
enable_auto_scaling = true
min_count = var.node_pool_admin_count_min
max_count = var.node_pool_admin_count_max
node_count = var.node_pool_admin_count_min

lifecycle {
ignore_changes = [
# Changed by the autoscaler, so we need to ignore it.
node_count
]
}
}


# Add a application default nodepool.
resource "azurerm_kubernetes_cluster_node_pool" "app_default" {
name = "appdefault"
resource "azurerm_kubernetes_cluster_node_pool" "pool" {
for_each = var.node_pools
name = each.key
kubernetes_cluster_id = azurerm_kubernetes_cluster.cluster.id
vnet_subnet_id = azurerm_subnet.aks.id
node_labels = {
Expand All @@ -106,15 +78,15 @@ resource "azurerm_kubernetes_cluster_node_pool" "app_default" {
# low resource requests, we're keeping the number of pods on a node low to
# avoid running the nodes too hot.
# Be aware that changing this value will destroy and recreate the nodepool.
max_pods = 30
max_pods = try(each.value.max_pods, 30)

vm_size = var.node_pool_app_default_vm_sku
vm_size = each.value.vm

# Enable autoscaling.
enable_auto_scaling = true
min_count = var.node_pool_app_default_count_min
max_count = var.node_pool_app_default_count_max
node_count = var.node_pool_app_default_count_min
min_count = each.value.min
max_count = each.value.max
#node_count = each.value.min

lifecycle {
ignore_changes = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ variable "location" {
default = "West Europe"
}

variable "node_pools" {
description = "The node pools (other than the system one) used for the cluster"
default = {}
type = map
}

variable "node_pool_admin_count_max" {
description = "The maximum number of pods to autoscale the administration nodepool to"
default = 6
Expand Down

0 comments on commit a22d573

Please sign in to comment.