diff --git a/modules/aws-eks/main.tf b/modules/aws-eks/main.tf index 43b7e64..4c0a69c 100644 --- a/modules/aws-eks/main.tf +++ b/modules/aws-eks/main.tf @@ -1,29 +1,29 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} +data "aws_iam_session_context" "current" { + # This data source provides information on the IAM source role of an STS assumed role + # For non-role ARNs, this data source simply passes the ARN through issuer ARN + # Ref https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2327#issuecomment-1355581682 + # Ref https://github.com/hashicorp/terraform-provider-aws/issues/28381 + arn = data.aws_caller_identity.current.arn +} + locals { create = var.create + partition = data.aws_partition.current.partition + cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) + + create_outposts_local_cluster = length(var.outpost_config) > 0 + enable_cluster_encryption_config = length(var.cluster_encryption_config) > 0 && !local.create_outposts_local_cluster } ################################################################################ # Cluster ################################################################################ -data "aws_subnets" "private_subnets_with_eks_tag" { - filter { - name = "vpc-id" - values = [var.vpc_id] - } - filter { - name = "tag:Name" - values = ["${var.subnet_id_names}"] - } - -} - - resource "aws_eks_cluster" "this" { count = local.create ? 1 : 0 @@ -32,21 +32,48 @@ resource "aws_eks_cluster" "this" { version = var.cluster_version enabled_cluster_log_types = var.cluster_enabled_log_types + access_config { + authentication_mode = var.authentication_mode + + # See access entries below - this is a one time operation from the EKS API. + # Instead, we are hardcoding this to false and if users wish to achieve this + # same functionality, we will do that through an access entry which can be + # enabled or disabled at any time of their choosing using the variable + # var.enable_cluster_creator_admin_permissions + bootstrap_cluster_creator_admin_permissions = false + } + vpc_config { security_group_ids = compact(distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))) - subnet_ids = try(data.aws_subnets.private_subnets_with_eks_tag.ids, null) + subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids) endpoint_private_access = var.cluster_endpoint_private_access endpoint_public_access = var.cluster_endpoint_public_access public_access_cidrs = var.cluster_endpoint_public_access_cidrs } - kubernetes_network_config { - ip_family = var.cluster_ip_family - service_ipv4_cidr = var.cluster_service_ipv4_cidr + dynamic "kubernetes_network_config" { + # Not valid on Outposts + for_each = local.create_outposts_local_cluster ? [] : [1] + + content { + ip_family = var.cluster_ip_family + service_ipv4_cidr = var.cluster_service_ipv4_cidr + service_ipv6_cidr = var.cluster_service_ipv6_cidr + } + } + + dynamic "outpost_config" { + for_each = local.create_outposts_local_cluster ? [var.outpost_config] : [] + + content { + control_plane_instance_type = outpost_config.value.control_plane_instance_type + outpost_arns = outpost_config.value.outpost_arns + } } dynamic "encryption_config" { - for_each = toset(var.cluster_encryption_config) + # Not available on Outposts + for_each = local.enable_cluster_encryption_config ? [var.cluster_encryption_config] : [] content { provider { @@ -57,26 +84,39 @@ resource "aws_eks_cluster" "this" { } tags = merge( + { terraform-aws-modules = "eks" }, var.tags, var.cluster_tags, ) timeouts { - create = lookup(var.cluster_timeouts, "create", null) - update = lookup(var.cluster_timeouts, "update", null) - delete = lookup(var.cluster_timeouts, "delete", null) + create = try(var.cluster_timeouts.create, null) + update = try(var.cluster_timeouts.update, null) + delete = try(var.cluster_timeouts.delete, null) } depends_on = [ aws_iam_role_policy_attachment.this, aws_security_group_rule.cluster, aws_security_group_rule.node, - aws_cloudwatch_log_group.this + aws_cloudwatch_log_group.this, + aws_iam_policy.cni_ipv6_policy, ] + + lifecycle { + ignore_changes = [ + access_config[0].bootstrap_cluster_creator_admin_permissions + ] + } } resource "aws_ec2_tag" "cluster_primary_security_group" { - for_each = { for k, v in merge(var.tags, var.cluster_tags) : k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags } + # This should not affect the name of the cluster primary security group + # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006 + # Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008 + for_each = { for k, v in merge(var.tags, var.cluster_tags) : + k => v if local.create && k != "Name" && var.create_cluster_primary_security_group_tags && v != null + } resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id key = each.key @@ -89,8 +129,93 @@ resource "aws_cloudwatch_log_group" "this" { name = "/aws/eks/${var.cluster_name}/cluster" retention_in_days = var.cloudwatch_log_group_retention_in_days kms_key_id = var.cloudwatch_log_group_kms_key_id + log_group_class = var.cloudwatch_log_group_class - tags = var.tags + tags = merge( + var.tags, + var.cloudwatch_log_group_tags, + { Name = "/aws/eks/${var.cluster_name}/cluster" } + ) +} + +################################################################################ +# Access Entry +################################################################################ + +locals { + # This replaces the one time logic from the EKS API with something that can be + # better controlled by users through Terraform + bootstrap_cluster_creator_admin_permissions = { + cluster_creator = { + principal_arn = data.aws_iam_session_context.current.issuer_arn + type = "STANDARD" + + policy_associations = { + admin = { + policy_arn = "arn:${local.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } + + # Merge the bootstrap behavior with the entries that users provide + merged_access_entries = merge( + { for k, v in local.bootstrap_cluster_creator_admin_permissions : k => v if var.enable_cluster_creator_admin_permissions }, + var.access_entries, + ) + + # Flatten out entries and policy associations so users can specify the policy + # associations within a single entry + flattened_access_entries = flatten([ + for entry_key, entry_val in local.merged_access_entries : [ + for pol_key, pol_val in lookup(entry_val, "policy_associations", {}) : + merge( + { + principal_arn = entry_val.principal_arn + entry_key = entry_key + pol_key = pol_key + }, + { for k, v in { + association_policy_arn = pol_val.policy_arn + association_access_scope_type = pol_val.access_scope.type + association_access_scope_namespaces = lookup(pol_val.access_scope, "namespaces", []) + } : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX"], lookup(entry_val, "type", "STANDARD")) }, + ) + ] + ]) +} + +resource "aws_eks_access_entry" "this" { + for_each = { for k, v in local.merged_access_entries : k => v if local.create } + + cluster_name = aws_eks_cluster.this[0].name + kubernetes_groups = try(each.value.kubernetes_groups, null) + principal_arn = each.value.principal_arn + type = try(each.value.type, "STANDARD") + user_name = try(each.value.user_name, null) + + tags = merge(var.tags, try(each.value.tags, {})) +} + +resource "aws_eks_access_policy_association" "this" { + for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create } + + access_scope { + namespaces = try(each.value.association_access_scope_namespaces, []) + type = each.value.association_access_scope_type + } + + cluster_name = aws_eks_cluster.this[0].name + + policy_arn = each.value.association_policy_arn + principal_arn = each.value.principal_arn + + depends_on = [ + aws_eks_access_entry.this, + ] } ################################################################################ @@ -99,9 +224,9 @@ resource "aws_cloudwatch_log_group" "this" { module "kms" { source = "terraform-aws-modules/kms/aws" - version = "1.0.2" # Note - be mindful of Terraform/provider version compatibility between modules + version = "2.1.0" # Note - be mindful of Terraform/provider version compatibility between modules - create = local.create && var.create_kms_key + create = local.create && var.create_kms_key && local.enable_cluster_encryption_config # not valid on Outposts description = coalesce(var.kms_key_description, "${var.cluster_name} cluster encryption key") key_usage = "ENCRYPT_DECRYPT" @@ -111,16 +236,23 @@ module "kms" { # Policy enable_default_policy = var.kms_key_enable_default_policy key_owners = var.kms_key_owners - key_administrators = coalescelist(var.kms_key_administrators, [data.aws_caller_identity.current.arn]) + key_administrators = coalescelist(var.kms_key_administrators, [data.aws_iam_session_context.current.issuer_arn]) key_users = concat([local.cluster_role], var.kms_key_users) key_service_users = var.kms_key_service_users source_policy_documents = var.kms_key_source_policy_documents override_policy_documents = var.kms_key_override_policy_documents # Aliases - aliases = concat(["eks/${var.cluster_name}"], var.kms_key_aliases) + aliases = var.kms_key_aliases + computed_aliases = { + # Computed since users can pass in computed values for cluster name such as random provider resources + cluster = { name = "eks/${var.cluster_name}" } + } - tags = var.tags + tags = merge( + { terraform-aws-modules = "eks" }, + var.tags, + ) } ################################################################################ @@ -134,7 +266,8 @@ locals { cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id - cluster_security_group_rules = { + # Do not add rules to node security group if the module is not creating it + cluster_security_group_rules = { for k, v in { ingress_nodes_443 = { description = "Node groups to cluster API" protocol = "tcp" @@ -143,23 +276,7 @@ locals { type = "ingress" source_node_security_group = true } - egress_nodes_443 = { - description = "Cluster API to node groups" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_node_security_group = true - } - egress_nodes_kubelet = { - description = "Cluster API to node kubelets" - protocol = "tcp" - from_port = 10250 - to_port = 10250 - type = "egress" - source_node_security_group = true - } - } + } : k => v if local.create_node_sg } } resource "aws_security_group" "cluster" { @@ -182,7 +299,10 @@ resource "aws_security_group" "cluster" { } resource "aws_security_group_rule" "cluster" { - for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg } + for_each = { for k, v in merge( + local.cluster_security_group_rules, + var.cluster_security_group_additional_rules + ) : k => v if local.create_cluster_sg } # Required security_group_id = aws_security_group.cluster[0].id @@ -192,15 +312,12 @@ resource "aws_security_group_rule" "cluster" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_node_security_group, false) ? local.node_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", null) + self = lookup(each.value, "self", null) + source_security_group_id = try(each.value.source_node_security_group, false) ? local.node_security_group_id : lookup(each.value, "source_security_group_id", null) } ################################################################################ @@ -208,17 +325,26 @@ resource "aws_security_group_rule" "cluster" { # Note - this is different from EKS identity provider ################################################################################ +locals { + # Not available on outposts + create_oidc_provider = local.create && var.enable_irsa && !local.create_outposts_local_cluster + + oidc_root_ca_thumbprint = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? [data.tls_certificate.this[0].certificates[0].sha1_fingerprint] : [] +} + data "tls_certificate" "this" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? 1 : 0 url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer } resource "aws_iam_openid_connect_provider" "oidc_provider" { - count = local.create && var.enable_irsa ? 1 : 0 + # Not available on outposts + count = local.create_oidc_provider ? 1 : 0 - client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences))) - thumbprint_list = concat([data.tls_certificate.this[0].certificates[0].sha1_fingerprint], var.custom_oidc_thumbprints) + client_id_list = distinct(compact(concat(["sts.amazonaws.com"], var.openid_connect_audiences))) + thumbprint_list = concat(local.oidc_root_ca_thumbprint, var.custom_oidc_thumbprints) url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer tags = merge( @@ -232,14 +358,11 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { ################################################################################ locals { - create_iam_role = local.create && var.create_iam_role - iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") - policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + create_iam_role = local.create && var.create_iam_role + iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") + iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption") - - - dns_suffix = coalesce(var.cluster_iam_role_dns_suffix, data.aws_partition.current.dns_suffix) } data "aws_iam_policy_document" "assume_role_policy" { @@ -251,7 +374,18 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["eks.${local.dns_suffix}"] + identifiers = ["eks.amazonaws.com"] + } + + dynamic "principals" { + for_each = local.create_outposts_local_cluster ? [1] : [] + + content { + type = "Service" + identifiers = [ + "ec2.amazonaws.com", + ] + } } } } @@ -268,7 +402,8 @@ resource "aws_iam_role" "this" { permissions_boundary = var.iam_role_permissions_boundary force_detach_policies = true - # Resources running on the cluster are still generaring logs when destroying the module resources + # https://github.com/terraform-aws-modules/terraform-aws-eks/issues/920 + # Resources running on the cluster are still generating logs when destroying the module resources # which results in the log group being re-created even after Terraform destroys it. Removing the # ability for the cluster role to create the log group prevents this log group from being re-created # outside of Terraform due to services still generating logs during destroy process @@ -283,7 +418,7 @@ resource "aws_iam_role" "this" { { Action = ["logs:CreateLogGroup"] Effect = "Deny" - Resource = aws_cloudwatch_log_group.this[0].arn + Resource = "*" }, ] }) @@ -295,10 +430,17 @@ resource "aws_iam_role" "this" { # Policies attached ref https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html resource "aws_iam_role_policy_attachment" "this" { - for_each = local.create_iam_role ? toset(compact(distinct(concat([ - "${local.policy_arn_prefix}/AmazonEKSClusterPolicy", - "${local.policy_arn_prefix}/AmazonEKSVPCResourceController", - ], var.iam_role_additional_policies)))) : toset([]) + for_each = { for k, v in { + AmazonEKSClusterPolicy = local.create_outposts_local_cluster ? "${local.iam_role_policy_prefix}/AmazonEKSLocalOutpostClusterPolicy" : "${local.iam_role_policy_prefix}/AmazonEKSClusterPolicy", + AmazonEKSVPCResourceController = "${local.iam_role_policy_prefix}/AmazonEKSVPCResourceController", + } : k => v if local.create_iam_role } + + policy_arn = each.value + role = aws_iam_role.this[0].name +} + +resource "aws_iam_role_policy_attachment" "additional" { + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name @@ -306,14 +448,16 @@ resource "aws_iam_role_policy_attachment" "this" { # Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply` resource "aws_iam_role_policy_attachment" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 policy_arn = aws_iam_policy.cluster_encryption[0].arn role = aws_iam_role.this[0].name } resource "aws_iam_policy" "cluster_encryption" { - count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0 + # Encryption config not available on Outposts + count = local.create_iam_role && var.attach_cluster_encryption_policy && local.enable_cluster_encryption_config ? 1 : 0 name = var.cluster_encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name name_prefix = var.cluster_encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null @@ -331,7 +475,7 @@ resource "aws_iam_policy" "cluster_encryption" { "kms:DescribeKey", ] Effect = "Allow" - Resource = var.create_kms_key ? [module.kms.key_arn] : [for config in var.cluster_encryption_config : config.provider_key_arn] + Resource = var.create_kms_key ? module.kms.key_arn : var.cluster_encryption_config.provider_key_arn }, ] }) @@ -343,16 +487,33 @@ resource "aws_iam_policy" "cluster_encryption" { # EKS Addons ################################################################################ +data "aws_eks_addon_version" "this" { + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } + + addon_name = try(each.value.name, each.key) + kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) + most_recent = try(each.value.most_recent, null) +} + resource "aws_eks_addon" "this" { - for_each = { for k, v in var.cluster_addons : k => v if local.create } + # Not supported on outposts + for_each = { for k, v in var.cluster_addons : k => v if !try(v.before_compute, false) && local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = lookup(each.value, "addon_version", null) - resolve_conflicts = lookup(each.value, "resolve_conflicts", null) - service_account_role_arn = lookup(each.value, "service_account_role_arn", null) - configuration_values = lookup(each.value, "configuration_values", null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) + + timeouts { + create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) + update = try(each.value.timeouts.update, var.cluster_addons_timeouts.update, null) + delete = try(each.value.timeouts.delete, var.cluster_addons_timeouts.delete, null) + } depends_on = [ module.fargate_profile, @@ -360,137 +521,61 @@ resource "aws_eks_addon" "this" { module.self_managed_node_group, ] - tags = var.tags + tags = merge(var.tags, try(each.value.tags, {})) } -################################################################################ -# EKS Identity Provider -# Note - this is different from IRSA -################################################################################ - -resource "aws_eks_identity_provider_config" "this" { - for_each = { for k, v in var.cluster_identity_providers : k => v if local.create } +resource "aws_eks_addon" "before_compute" { + # Not supported on outposts + for_each = { for k, v in var.cluster_addons : k => v if try(v.before_compute, false) && local.create && !local.create_outposts_local_cluster } cluster_name = aws_eks_cluster.this[0].name + addon_name = try(each.value.name, each.key) - oidc { - client_id = each.value.client_id - groups_claim = lookup(each.value, "groups_claim", null) - groups_prefix = lookup(each.value, "groups_prefix", null) - identity_provider_config_name = try(each.value.identity_provider_config_name, each.key) - issuer_url = try(each.value.issuer_url, aws_eks_cluster.this[0].identity[0].oidc[0].issuer) - required_claims = lookup(each.value, "required_claims", null) - username_claim = lookup(each.value, "username_claim", null) - username_prefix = lookup(each.value, "username_prefix", null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) + + timeouts { + create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) + update = try(each.value.timeouts.update, var.cluster_addons_timeouts.update, null) + delete = try(each.value.timeouts.delete, var.cluster_addons_timeouts.delete, null) } - tags = var.tags + tags = merge(var.tags, try(each.value.tags, {})) } ################################################################################ -# aws-auth configmap +# EKS Identity Provider +# Note - this is different from IRSA ################################################################################ locals { - node_iam_role_arns_non_windows = distinct( - compact( - concat( - [for group in module.eks_managed_node_group : group.iam_role_arn], - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"], - var.aws_auth_node_iam_role_arns_non_windows, - ) - ) - ) - - node_iam_role_arns_windows = distinct( - compact( - concat( - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"], - var.aws_auth_node_iam_role_arns_windows, - ) - ) - ) - - fargate_profile_pod_execution_role_arns = distinct( - compact( - concat( - [for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn], - var.aws_auth_fargate_profile_pod_execution_role_arns, - ) - ) - ) - - aws_auth_configmap_data = { - mapRoles = yamlencode(concat( - [for role_arn in local.node_iam_role_arns_non_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - } - ], - [for role_arn in local.node_iam_role_arns_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "eks:kube-proxy-windows", - "system:bootstrappers", - "system:nodes", - ] - } - ], - # Fargate profile - [for role_arn in local.fargate_profile_pod_execution_role_arns : { - rolearn = role_arn - username = "system:node:{{SessionName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - "system:node-proxier", - ] - } - ], - var.aws_auth_roles - )) - mapUsers = yamlencode(var.aws_auth_users) - mapAccounts = yamlencode(var.aws_auth_accounts) - } -} - -resource "kubernetes_config_map" "aws_auth" { - count = var.create && var.create_aws_auth_configmap ? 1 : 0 - - metadata { - name = "aws-auth" - namespace = "kube-system" - } - - data = local.aws_auth_configmap_data - - lifecycle { - # We are ignoring the data here since we will manage it with the resource below - # This is only intended to be used in scenarios where the configmap does not exist - ignore_changes = [data] - } + # Maintain current behavior for <= 1.29, remove default for >= 1.30 + # `null` will return the latest Kubernetes version from the EKS API, which at time of writing is 1.30 + # https://github.com/kubernetes/kubernetes/pull/123561 + idpc_backwards_compat_version = contains(["1.21", "1.22", "1.23", "1.24", "1.25", "1.26", "1.27", "1.28", "1.29"], coalesce(var.cluster_version, "1.30")) + idpc_issuer_url = local.idpc_backwards_compat_version ? try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null) : null } -resource "kubernetes_config_map_v1_data" "aws_auth" { - count = var.create && var.manage_aws_auth_configmap ? 1 : 0 +resource "aws_eks_identity_provider_config" "this" { + for_each = { for k, v in var.cluster_identity_providers : k => v if local.create && !local.create_outposts_local_cluster } - force = true + cluster_name = aws_eks_cluster.this[0].name - metadata { - name = "aws-auth" - namespace = "kube-system" + oidc { + client_id = each.value.client_id + groups_claim = lookup(each.value, "groups_claim", null) + groups_prefix = lookup(each.value, "groups_prefix", null) + identity_provider_config_name = try(each.value.identity_provider_config_name, each.key) + # TODO - make argument explicitly required on next breaking change + issuer_url = try(each.value.issuer_url, local.idpc_issuer_url) + required_claims = lookup(each.value, "required_claims", null) + username_claim = lookup(each.value, "username_claim", null) + username_prefix = lookup(each.value, "username_prefix", null) } - data = local.aws_auth_configmap_data - - depends_on = [ - # Required for instances where the configmap does not exist yet to avoid race condition - kubernetes_config_map.aws_auth, - ] + tags = merge(var.tags, try(each.value.tags, {})) } - diff --git a/modules/aws-eks/node_groups.tf b/modules/aws-eks/node_groups.tf index 73480e8..7228931 100644 --- a/modules/aws-eks/node_groups.tf +++ b/modules/aws-eks/node_groups.tf @@ -4,11 +4,45 @@ locals { http_tokens = "required" http_put_response_hop_limit = 2 } + + # EKS managed node group + default_update_config = { + max_unavailable_percentage = 33 + } + + # Self-managed node group + default_instance_refresh = { + strategy = "Rolling" + preferences = { + min_healthy_percentage = 66 + } + } + + kubernetes_network_config = try(aws_eks_cluster.this[0].kubernetes_network_config[0], {}) +} + +# This sleep resource is used to provide a timed gap between the cluster creation and the downstream dependencies +# that consume the outputs from here. Any of the values that are used as triggers can be used in dependencies +# to ensure that the downstream resources are created after both the cluster is ready and the sleep time has passed. +# This was primarily added to give addons that need to be configured BEFORE data plane compute resources +# enough time to create and configure themselves before the data plane compute resources are created. +resource "time_sleep" "this" { + count = var.create ? 1 : 0 + + create_duration = var.dataplane_wait_duration + + triggers = { + cluster_name = aws_eks_cluster.this[0].name + cluster_endpoint = aws_eks_cluster.this[0].endpoint + cluster_version = aws_eks_cluster.this[0].version + cluster_service_cidr = var.cluster_ip_family == "ipv6" ? try(local.kubernetes_network_config.service_ipv6_cidr, "") : try(local.kubernetes_network_config.service_ipv4_cidr, "") + + cluster_certificate_authority_data = aws_eks_cluster.this[0].certificate_authority[0].data + } } ################################################################################ # EKS IPV6 CNI Policy -# TODO - hopefully AWS releases a managed policy which can replace this # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy ################################################################################ @@ -30,11 +64,11 @@ data "aws_iam_policy_document" "cni_ipv6_policy" { statement { sid = "CreateTags" actions = ["ec2:CreateTags"] - resources = ["arn:${data.aws_partition.current.partition}:ec2:*:*:network-interface/*"] + resources = ["arn:${local.partition}:ec2:*:*:network-interface/*"] } } -# Note - we are keeping this to a minimim in hopes that its soon replaced with an AWS managed policy like `AmazonEKS_CNI_Policy` +# Note - we are keeping this to a minimum in hopes that its soon replaced with an AWS managed policy like `AmazonEKS_CNI_Policy` resource "aws_iam_policy" "cni_ipv6_policy" { count = var.create && var.create_cni_ipv6_iam_policy ? 1 : 0 @@ -59,14 +93,6 @@ locals { node_security_group_id = local.create_node_sg ? aws_security_group.node[0].id : var.node_security_group_id node_security_group_rules = { - egress_cluster_443 = { - description = "Node groups to cluster API" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - source_cluster_security_group = true - } ingress_cluster_443 = { description = "Cluster API to node groups" protocol = "tcp" @@ -75,22 +101,6 @@ locals { type = "ingress" source_cluster_security_group = true } - ingress_apiserver_9443 = { - description = "Cluster API to Kube Apiserver for AWS Load Balancer Controller" - protocol = "tcp" - from_port = 9443 - to_port = 9443 - type = "ingress" - source_cluster_security_group = true - } - ingress_apiserver_8443 = { - description = "Nginx Ingress controller to Kube Apiserver" - protocol = "tcp" - from_port = 8443 - to_port = 8443 - type = "ingress" - source_cluster_security_group = true - } ingress_cluster_kubelet = { description = "Cluster API to node kubelets" protocol = "tcp" @@ -99,14 +109,6 @@ locals { type = "ingress" source_cluster_security_group = true } - ingress_self_apiserver_8443 = { - description = "Nginx Ingress controller to Kube Apiserver" - protocol = "tcp" - from_port = 8443 - to_port = 8443 - type = "ingress" - self = true - } ingress_self_coredns_tcp = { description = "Node to node CoreDNS" protocol = "tcp" @@ -115,57 +117,91 @@ locals { type = "ingress" self = true } - egress_self_coredns_tcp = { - description = "Node to node CoreDNS" - protocol = "tcp" - from_port = 53 - to_port = 53 - type = "egress" - self = true - } ingress_self_coredns_udp = { - description = "Node to node CoreDNS" + description = "Node to node CoreDNS UDP" protocol = "udp" from_port = 53 to_port = 53 type = "ingress" self = true } - egress_self_coredns_udp = { - description = "Node to node CoreDNS" - protocol = "udp" - from_port = 53 - to_port = 53 - type = "egress" + } + + node_security_group_recommended_rules = { for k, v in { + ingress_nodes_ephemeral = { + description = "Node to node ingress on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" self = true } - egress_https = { - description = "Egress all HTTPS to internet" - protocol = "tcp" - from_port = 443 - to_port = 443 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null + # metrics-server + ingress_cluster_4443_webhook = { + description = "Cluster API to node 4443/tcp webhook" + protocol = "tcp" + from_port = 4443 + to_port = 4443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_tcp = { - description = "Egress NTP/TCP to internet" - protocol = "tcp" - from_port = 123 - to_port = 123 - type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + # prometheus-adapter + ingress_cluster_6443_webhook = { + description = "Cluster API to node 6443/tcp webhook" + protocol = "tcp" + from_port = 6443 + to_port = 6443 + type = "ingress" + source_cluster_security_group = true } - egress_ntp_udp = { - description = "Egress NTP/UDP to internet" - protocol = "udp" - from_port = 123 - to_port = 123 + # Karpenter + ingress_cluster_8443_webhook = { + description = "Cluster API to node 8443/tcp webhook" + protocol = "tcp" + from_port = 8443 + to_port = 8443 + type = "ingress" + source_cluster_security_group = true + } + # ALB controller, NGINX + ingress_cluster_9443_webhook = { + description = "Cluster API to node 9443/tcp webhook" + protocol = "tcp" + from_port = 9443 + to_port = 9443 + type = "ingress" + source_cluster_security_group = true + } + egress_all = { + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 type = "egress" - cidr_blocks = var.node_security_group_ntp_ipv4_cidr_block - ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? var.node_security_group_ntp_ipv6_cidr_block : null + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null } + } : k => v if var.node_security_group_enable_recommended_rules } + + efa_security_group_rules = { for k, v in + { + ingress_all_self_efa = { + description = "Node to node EFA" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + egress_all_self_efa = { + description = "Node to node EFA" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + self = true + } + } : k => v if var.enable_efa_support } } @@ -192,7 +228,12 @@ resource "aws_security_group" "node" { } resource "aws_security_group_rule" "node" { - for_each = { for k, v in merge(local.node_security_group_rules, var.node_security_group_additional_rules) : k => v if local.create_node_sg } + for_each = { for k, v in merge( + local.efa_security_group_rules, + local.node_security_group_rules, + local.node_security_group_recommended_rules, + var.node_security_group_additional_rules, + ) : k => v if local.create_node_sg } # Required security_group_id = aws_security_group.node[0].id @@ -202,15 +243,12 @@ resource "aws_security_group_rule" "node" { type = each.value.type # Optional - description = try(each.value.description, null) - cidr_blocks = try(each.value.cidr_blocks, null) - ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null) - prefix_list_ids = try(each.value.prefix_list_ids, []) - self = try(each.value.self, null) - source_security_group_id = try( - each.value.source_security_group_id, - try(each.value.source_cluster_security_group, false) ? local.cluster_security_group_id : null - ) + description = lookup(each.value, "description", null) + cidr_blocks = lookup(each.value, "cidr_blocks", null) + ipv6_cidr_blocks = lookup(each.value, "ipv6_cidr_blocks", null) + prefix_list_ids = lookup(each.value, "prefix_list_ids", []) + self = lookup(each.value, "self", null) + source_security_group_id = try(each.value.source_cluster_security_group, false) ? local.cluster_security_group_id : lookup(each.value, "source_security_group_id", null) } ################################################################################ @@ -220,12 +258,12 @@ resource "aws_security_group_rule" "node" { module "fargate_profile" { source = "./modules/fargate-profile" - for_each = { for k, v in var.fargate_profiles : k => v if var.create } + for_each = { for k, v in var.fargate_profiles : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) # Fargate Profile - cluster_name = aws_eks_cluster.this[0].name + cluster_name = time_sleep.this[0].triggers["cluster_name"] cluster_ip_family = var.cluster_ip_family name = try(each.value.name, each.key) subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids) @@ -242,7 +280,9 @@ module "fargate_profile" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.fargate_profile_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.fargate_profile_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.fargate_profile_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.fargate_profile_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.fargate_profile_defaults, "iam_role_additional_policies", {})) tags = merge(var.tags, try(each.value.tags, var.fargate_profile_defaults.tags, {})) } @@ -254,14 +294,12 @@ module "fargate_profile" { module "eks_managed_node_group" { source = "./modules/eks-managed-node-group" - for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create } + for_each = { for k, v in var.eks_managed_node_groups : k => v if var.create && !local.create_outposts_local_cluster } create = try(each.value.create, true) - cluster_name = aws_eks_cluster.this[0].name - cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) - cluster_security_group_id = local.cluster_security_group_id - cluster_ip_family = var.cluster_ip_family + cluster_name = time_sleep.this[0].triggers["cluster_name"] + cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) # EKS Managed Node Group name = try(each.value.name, each.key) @@ -273,9 +311,10 @@ module "eks_managed_node_group" { max_size = try(each.value.max_size, var.eks_managed_node_group_defaults.max_size, 3) desired_size = try(each.value.desired_size, var.eks_managed_node_group_defaults.desired_size, 1) - ami_id = try(each.value.ami_id, var.eks_managed_node_group_defaults.ami_id, "") - ami_type = try(each.value.ami_type, var.eks_managed_node_group_defaults.ami_type, null) - ami_release_version = try(each.value.ami_release_version, var.eks_managed_node_group_defaults.ami_release_version, null) + ami_id = try(each.value.ami_id, var.eks_managed_node_group_defaults.ami_id, "") + ami_type = try(each.value.ami_type, var.eks_managed_node_group_defaults.ami_type, null) + ami_release_version = try(each.value.ami_release_version, var.eks_managed_node_group_defaults.ami_release_version, null) + use_latest_ami_release_version = try(each.value.use_latest_ami_release_version, var.eks_managed_node_group_defaults.use_latest_ami_release_version, false) capacity_type = try(each.value.capacity_type, var.eks_managed_node_group_defaults.capacity_type, null) disk_size = try(each.value.disk_size, var.eks_managed_node_group_defaults.disk_size, null) @@ -285,35 +324,42 @@ module "eks_managed_node_group" { remote_access = try(each.value.remote_access, var.eks_managed_node_group_defaults.remote_access, {}) taints = try(each.value.taints, var.eks_managed_node_group_defaults.taints, {}) - update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, {}) + update_config = try(each.value.update_config, var.eks_managed_node_group_defaults.update_config, local.default_update_config) timeouts = try(each.value.timeouts, var.eks_managed_node_group_defaults.timeouts, {}) # User data platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux") - cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "") - cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") + cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") + cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr + cluster_ip_family = var.cluster_ip_family + cluster_service_cidr = try(time_sleep.this[0].triggers["cluster_service_cidr"], "") enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.eks_managed_node_group_defaults.enable_bootstrap_user_data, false) pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "") post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "") bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.eks_managed_node_group_defaults.bootstrap_extra_args, "") user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "") + cloudinit_pre_nodeadm = try(each.value.cloudinit_pre_nodeadm, var.eks_managed_node_group_defaults.cloudinit_pre_nodeadm, []) + cloudinit_post_nodeadm = try(each.value.cloudinit_post_nodeadm, var.eks_managed_node_group_defaults.cloudinit_post_nodeadm, []) # Launch Template - create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) - - ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) - key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) + use_custom_launch_template = try(each.value.use_custom_launch_template, var.eks_managed_node_group_defaults.use_custom_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.eks_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.eks_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.eks_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.eks_managed_node_group_defaults.launch_template_version, null) launch_template_default_version = try(each.value.launch_template_default_version, var.eks_managed_node_group_defaults.launch_template_default_version, null) update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.eks_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) - kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) + launch_template_description = try(each.value.launch_template_description, var.eks_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} EKS managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.eks_managed_node_group_defaults.launch_template_tags, {}) + tag_specifications = try(each.value.tag_specifications, var.eks_managed_node_group_defaults.tag_specifications, ["instance", "volume", "network-interface"]) + + ebs_optimized = try(each.value.ebs_optimized, var.eks_managed_node_group_defaults.ebs_optimized, null) + key_name = try(each.value.key_name, var.eks_managed_node_group_defaults.key_name, null) + disable_api_termination = try(each.value.disable_api_termination, var.eks_managed_node_group_defaults.disable_api_termination, null) + kernel_id = try(each.value.kernel_id, var.eks_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.eks_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.eks_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.eks_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -326,8 +372,13 @@ module "eks_managed_node_group" { license_specifications = try(each.value.license_specifications, var.eks_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.eks_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, true) + enable_efa_support = try(each.value.enable_efa_support, var.eks_managed_node_group_defaults.enable_efa_support, false) + create_placement_group = try(each.value.create_placement_group, var.eks_managed_node_group_defaults.create_placement_group, false) + placement_group_strategy = try(each.value.placement_group_strategy, var.eks_managed_node_group_defaults.placement_group_strategy, "cluster") network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.eks_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.eks_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_role = try(each.value.create_iam_role, var.eks_managed_node_group_defaults.create_iam_role, true) @@ -339,18 +390,17 @@ module "eks_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.eks_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.eks_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.eks_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.eks_managed_node_group_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) + + # Autoscaling group schedule + create_schedule = try(each.value.create_schedule, var.eks_managed_node_group_defaults.create_schedule, true) + schedules = try(each.value.schedules, var.eks_managed_node_group_defaults.schedules, {}) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.eks_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.eks_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.eks_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.eks_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.eks_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.eks_managed_node_group_defaults.security_group_description, "EKS managed node group security group") - vpc_id = try(each.value.vpc_id, var.eks_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.eks_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.eks_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.eks_managed_node_group_defaults.tags, {})) } @@ -366,8 +416,7 @@ module "self_managed_node_group" { create = try(each.value.create, true) - cluster_name = aws_eks_cluster.this[0].name - cluster_ip_family = var.cluster_ip_family + cluster_name = time_sleep.this[0].triggers["cluster_name"] # Autoscaling Group create_autoscaling_group = try(each.value.create_autoscaling_group, var.self_managed_node_group_defaults.create_autoscaling_group, true) @@ -386,64 +435,72 @@ module "self_managed_node_group" { wait_for_elb_capacity = try(each.value.wait_for_elb_capacity, var.self_managed_node_group_defaults.wait_for_elb_capacity, null) wait_for_capacity_timeout = try(each.value.wait_for_capacity_timeout, var.self_managed_node_group_defaults.wait_for_capacity_timeout, null) default_cooldown = try(each.value.default_cooldown, var.self_managed_node_group_defaults.default_cooldown, null) + default_instance_warmup = try(each.value.default_instance_warmup, var.self_managed_node_group_defaults.default_instance_warmup, null) protect_from_scale_in = try(each.value.protect_from_scale_in, var.self_managed_node_group_defaults.protect_from_scale_in, null) + context = try(each.value.context, var.self_managed_node_group_defaults.context, null) target_group_arns = try(each.value.target_group_arns, var.self_managed_node_group_defaults.target_group_arns, []) placement_group = try(each.value.placement_group, var.self_managed_node_group_defaults.placement_group, null) health_check_type = try(each.value.health_check_type, var.self_managed_node_group_defaults.health_check_type, null) health_check_grace_period = try(each.value.health_check_grace_period, var.self_managed_node_group_defaults.health_check_grace_period, null) - force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) - termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) - suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) - max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) + force_delete = try(each.value.force_delete, var.self_managed_node_group_defaults.force_delete, null) + force_delete_warm_pool = try(each.value.force_delete_warm_pool, var.self_managed_node_group_defaults.force_delete_warm_pool, null) + termination_policies = try(each.value.termination_policies, var.self_managed_node_group_defaults.termination_policies, []) + suspended_processes = try(each.value.suspended_processes, var.self_managed_node_group_defaults.suspended_processes, []) + max_instance_lifetime = try(each.value.max_instance_lifetime, var.self_managed_node_group_defaults.max_instance_lifetime, null) enabled_metrics = try(each.value.enabled_metrics, var.self_managed_node_group_defaults.enabled_metrics, []) metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null) service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null) - initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) - instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, {}) - use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) - mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) - warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) - - create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, false) - schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) + initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) + instance_maintenance_policy = try(each.value.instance_maintenance_policy, var.self_managed_node_group_defaults.instance_maintenance_policy, {}) + instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) + use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) + mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) + warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null) - use_default_tags = try(each.value.use_default_tags, var.self_managed_node_group_defaults.use_default_tags, false) autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, {}) # User data - platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, "linux") - cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "") - cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") + platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, null) + # TODO - update this when `var.platform` is removed in v21.0 + ami_type = try(each.value.ami_type, var.self_managed_node_group_defaults.ami_type, "AL2_x86_64") + cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") + cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") + cluster_service_cidr = try(time_sleep.this[0].triggers["cluster_service_cidr"], "") + cluster_ip_family = var.cluster_ip_family pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "") post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "") bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "") user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "") + cloudinit_pre_nodeadm = try(each.value.cloudinit_pre_nodeadm, var.self_managed_node_group_defaults.cloudinit_pre_nodeadm, []) + cloudinit_post_nodeadm = try(each.value.cloudinit_post_nodeadm, var.self_managed_node_group_defaults.cloudinit_post_nodeadm, []) # Launch Template - create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) - launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) - launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) - launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) - launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") - launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) + create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) + launch_template_id = try(each.value.launch_template_id, var.self_managed_node_group_defaults.launch_template_id, "") + launch_template_name = try(each.value.launch_template_name, var.self_managed_node_group_defaults.launch_template_name, each.key) + launch_template_use_name_prefix = try(each.value.launch_template_use_name_prefix, var.self_managed_node_group_defaults.launch_template_use_name_prefix, true) + launch_template_version = try(each.value.launch_template_version, var.self_managed_node_group_defaults.launch_template_version, null) + launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) + update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) + launch_template_description = try(each.value.launch_template_description, var.self_managed_node_group_defaults.launch_template_description, "Custom launch template for ${try(each.value.name, each.key)} self managed node group") + launch_template_tags = try(each.value.launch_template_tags, var.self_managed_node_group_defaults.launch_template_tags, {}) + tag_specifications = try(each.value.tag_specifications, var.self_managed_node_group_defaults.tag_specifications, ["instance", "volume", "network-interface"]) ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null) ami_id = try(each.value.ami_id, var.self_managed_node_group_defaults.ami_id, "") - cluster_version = try(each.value.cluster_version, var.self_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) + cluster_version = try(each.value.cluster_version, var.self_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large") key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null) - launch_template_default_version = try(each.value.launch_template_default_version, var.self_managed_node_group_defaults.launch_template_default_version, null) - update_launch_template_default_version = try(each.value.update_launch_template_default_version, var.self_managed_node_group_defaults.update_launch_template_default_version, true) - disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) - instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) - kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) - ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) + disable_api_termination = try(each.value.disable_api_termination, var.self_managed_node_group_defaults.disable_api_termination, null) + instance_initiated_shutdown_behavior = try(each.value.instance_initiated_shutdown_behavior, var.self_managed_node_group_defaults.instance_initiated_shutdown_behavior, null) + kernel_id = try(each.value.kernel_id, var.self_managed_node_group_defaults.kernel_id, null) + ram_disk_id = try(each.value.ram_disk_id, var.self_managed_node_group_defaults.ram_disk_id, null) block_device_mappings = try(each.value.block_device_mappings, var.self_managed_node_group_defaults.block_device_mappings, {}) capacity_reservation_specification = try(each.value.capacity_reservation_specification, var.self_managed_node_group_defaults.capacity_reservation_specification, {}) @@ -453,12 +510,16 @@ module "self_managed_node_group" { elastic_inference_accelerator = try(each.value.elastic_inference_accelerator, var.self_managed_node_group_defaults.elastic_inference_accelerator, {}) enclave_options = try(each.value.enclave_options, var.self_managed_node_group_defaults.enclave_options, {}) hibernation_options = try(each.value.hibernation_options, var.self_managed_node_group_defaults.hibernation_options, {}) + instance_requirements = try(each.value.instance_requirements, var.self_managed_node_group_defaults.instance_requirements, {}) instance_market_options = try(each.value.instance_market_options, var.self_managed_node_group_defaults.instance_market_options, {}) license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, true) + enable_efa_support = try(each.value.enable_efa_support, var.self_managed_node_group_defaults.enable_efa_support, false) network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, {}) + maintenance_options = try(each.value.maintenance_options, var.self_managed_node_group_defaults.maintenance_options, {}) + private_dns_name_options = try(each.value.private_dns_name_options, var.self_managed_node_group_defaults.private_dns_name_options, {}) # IAM role create_iam_instance_profile = try(each.value.create_iam_instance_profile, var.self_managed_node_group_defaults.create_iam_instance_profile, true) @@ -470,19 +531,21 @@ module "self_managed_node_group" { iam_role_permissions_boundary = try(each.value.iam_role_permissions_boundary, var.self_managed_node_group_defaults.iam_role_permissions_boundary, null) iam_role_tags = try(each.value.iam_role_tags, var.self_managed_node_group_defaults.iam_role_tags, {}) iam_role_attach_cni_policy = try(each.value.iam_role_attach_cni_policy, var.self_managed_node_group_defaults.iam_role_attach_cni_policy, true) - iam_role_additional_policies = try(each.value.iam_role_additional_policies, var.self_managed_node_group_defaults.iam_role_additional_policies, []) + # To better understand why this `lookup()` logic is required, see: + # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 + iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.self_managed_node_group_defaults, "iam_role_additional_policies", {})) + + # Access entry + create_access_entry = try(each.value.create_access_entry, var.self_managed_node_group_defaults.create_access_entry, true) + iam_role_arn = try(each.value.iam_role_arn, var.self_managed_node_group_defaults.iam_role_arn, null) + + # Autoscaling group schedule + create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) + schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) - cluster_security_group_id = local.cluster_security_group_id cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.self_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null - create_security_group = try(each.value.create_security_group, var.self_managed_node_group_defaults.create_security_group, true) - security_group_name = try(each.value.security_group_name, var.self_managed_node_group_defaults.security_group_name, null) - security_group_use_name_prefix = try(each.value.security_group_use_name_prefix, var.self_managed_node_group_defaults.security_group_use_name_prefix, true) - security_group_description = try(each.value.security_group_description, var.self_managed_node_group_defaults.security_group_description, "Self managed node group security group") - vpc_id = try(each.value.vpc_id, var.self_managed_node_group_defaults.vpc_id, var.vpc_id) - security_group_rules = try(each.value.security_group_rules, var.self_managed_node_group_defaults.security_group_rules, {}) - security_group_tags = try(each.value.security_group_tags, var.self_managed_node_group_defaults.security_group_tags, {}) tags = merge(var.tags, try(each.value.tags, var.self_managed_node_group_defaults.tags, {})) } diff --git a/modules/aws-eks/outputs.tf b/modules/aws-eks/outputs.tf index 8ee139c..45b68a4 100644 --- a/modules/aws-eks/outputs.tf +++ b/modules/aws-eks/outputs.tf @@ -4,47 +4,96 @@ output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" - value = try(aws_eks_cluster.this[0].arn, "") + value = try(aws_eks_cluster.this[0].arn, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_certificate_authority_data" { description = "Base64 encoded certificate data required to communicate with the cluster" - value = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") + value = try(aws_eks_cluster.this[0].certificate_authority[0].data, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_endpoint" { description = "Endpoint for your Kubernetes API server" - value = try(aws_eks_cluster.this[0].endpoint, "") + value = try(aws_eks_cluster.this[0].endpoint, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_id" { - description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready" - value = try(aws_eks_cluster.this[0].id, "") + description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" + value = try(aws_eks_cluster.this[0].cluster_id, "") +} + +output "cluster_name" { + description = "The name of the EKS cluster" + value = try(aws_eks_cluster.this[0].name, "") + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "") + value = try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null) } output "cluster_version" { description = "The Kubernetes version for the cluster" - value = try(aws_eks_cluster.this[0].version, "") + value = try(aws_eks_cluster.this[0].version, null) } output "cluster_platform_version" { description = "Platform version for the cluster" - value = try(aws_eks_cluster.this[0].platform_version, "") + value = try(aws_eks_cluster.this[0].platform_version, null) } output "cluster_status" { description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = try(aws_eks_cluster.this[0].status, "") + value = try(aws_eks_cluster.this[0].status, null) } output "cluster_primary_security_group_id" { description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, "") + value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null) +} + +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = var.cluster_ip_family == "ipv6" ? try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv6_cidr, null) : try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, null) +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = try(aws_eks_cluster.this[0].kubernetes_network_config[0].ip_family, null) +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = aws_eks_access_entry.this +} + +output "access_policy_associations" { + description = "Map of eks cluster access policy associations created and their attributes" + value = aws_eks_access_policy_association.this } ################################################################################ @@ -72,12 +121,12 @@ output "kms_key_policy" { output "cluster_security_group_arn" { description = "Amazon Resource Name (ARN) of the cluster security group" - value = try(aws_security_group.cluster[0].arn, "") + value = try(aws_security_group.cluster[0].arn, null) } output "cluster_security_group_id" { description = "ID of the cluster security group" - value = try(aws_security_group.cluster[0].id, "") + value = try(aws_security_group.cluster[0].id, null) } ################################################################################ @@ -86,12 +135,12 @@ output "cluster_security_group_id" { output "node_security_group_arn" { description = "Amazon Resource Name (ARN) of the node shared security group" - value = try(aws_security_group.node[0].arn, "") + value = try(aws_security_group.node[0].arn, null) } output "node_security_group_id" { description = "ID of the node shared security group" - value = try(aws_security_group.node[0].id, "") + value = try(aws_security_group.node[0].id, null) } ################################################################################ @@ -100,12 +149,17 @@ output "node_security_group_id" { output "oidc_provider" { description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", ""), "") + value = try(replace(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, "https://", ""), null) } output "oidc_provider_arn" { description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, "") + value = try(aws_iam_openid_connect_provider.oidc_provider[0].arn, null) +} + +output "cluster_tls_certificate_sha1_fingerprint" { + description = "The SHA1 fingerprint of the public key of the cluster's certificate" + value = try(data.tls_certificate.this[0].certificates[0].sha1_fingerprint, null) } ################################################################################ @@ -114,17 +168,17 @@ output "oidc_provider_arn" { output "cluster_iam_role_name" { description = "IAM role name of the EKS cluster" - value = try(aws_iam_role.this[0].name, "") + value = try(aws_iam_role.this[0].name, null) } output "cluster_iam_role_arn" { description = "IAM role ARN of the EKS cluster" - value = try(aws_iam_role.this[0].arn, "") + value = try(aws_iam_role.this[0].arn, null) } output "cluster_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, "") + value = try(aws_iam_role.this[0].unique_id, null) } ################################################################################ @@ -133,7 +187,7 @@ output "cluster_iam_role_unique_id" { output "cluster_addons" { description = "Map of attribute maps for all EKS cluster addons enabled" - value = aws_eks_addon.this + value = merge(aws_eks_addon.this, aws_eks_addon.before_compute) } ################################################################################ @@ -151,12 +205,12 @@ output "cluster_identity_providers" { output "cloudwatch_log_group_name" { description = "Name of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].name, "") + value = try(aws_cloudwatch_log_group.this[0].name, null) } output "cloudwatch_log_group_arn" { description = "Arn of cloudwatch log group created" - value = try(aws_cloudwatch_log_group.this[0].arn, "") + value = try(aws_cloudwatch_log_group.this[0].arn, null) } ################################################################################ @@ -177,11 +231,6 @@ output "eks_managed_node_groups" { value = module.eks_managed_node_group } -output "eks_managed_node_groups_iam_role_arn" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks_managed_node_group["linux"].iam_role_arn -} - output "eks_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by EKS managed node groups" value = compact(flatten([for group in module.eks_managed_node_group : group.node_group_autoscaling_group_names])) @@ -200,24 +249,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = compact([for group in module.self_managed_node_group : group.autoscaling_group_name]) } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "[DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = templatefile("${path.module}/templates/aws_auth_cm.tpl", - { - eks_managed_role_arns = distinct(compact([for group in module.eks_managed_node_group : group.iam_role_arn])) - self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"])) - win32_self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"])) - fargate_profile_pod_execution_role_arns = distinct(compact([for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn])) - } - ) -} - -output "cluster_name" { - description = "Name of the EKS cluster" - value = var.cluster_name -} \ No newline at end of file diff --git a/modules/aws-eks/variables.tf b/modules/aws-eks/variables.tf index 5b05540..eaacc82 100644 --- a/modules/aws-eks/variables.tf +++ b/modules/aws-eks/variables.tf @@ -1,5 +1,5 @@ variable "create" { - description = "Controls if EKS resources should be created (affects nearly all resources)" + description = "Controls if resources should be created (affects nearly all resources)" type = bool default = true } @@ -27,7 +27,7 @@ variable "cluster_name" { } variable "cluster_version" { - description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.22`)" + description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.27`)" type = string default = null } @@ -38,34 +38,40 @@ variable "cluster_enabled_log_types" { default = ["audit", "api", "authenticator"] } +variable "authentication_mode" { + description = "The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP`" + type = string + default = "API_AND_CONFIG_MAP" +} + variable "cluster_additional_security_group_ids" { description = "List of additional, externally created security group IDs to attach to the cluster control plane" type = list(string) default = [] } -variable "subnet_ids" { - description = "A list of subnet IDs where the nodes/node groups will be provisioned." +variable "control_plane_subnet_ids" { + description = "A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane" type = list(string) default = [] } -variable "subnet_id_names" { - description = "name of subnet ID's" - type = string - default = "*" +variable "subnet_ids" { + description = "A list of subnet IDs where the nodes/node groups will be provisioned. If `control_plane_subnet_ids` is not provided, the EKS cluster control plane (ENIs) will be provisioned in these subnets" + type = list(string) + default = [] } variable "cluster_endpoint_private_access" { description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled" type = bool - default = false + default = true } variable "cluster_endpoint_public_access" { description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled" type = bool - default = true + default = false } variable "cluster_endpoint_public_access_cidrs" { @@ -77,7 +83,7 @@ variable "cluster_endpoint_public_access_cidrs" { variable "cluster_ip_family" { description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created" type = string - default = null + default = "ipv4" } variable "cluster_service_ipv4_cidr" { @@ -86,10 +92,24 @@ variable "cluster_service_ipv4_cidr" { default = null } +variable "cluster_service_ipv6_cidr" { + description = "The CIDR block to assign Kubernetes pod and service IP addresses from if `ipv6` was specified when the cluster was created. Kubernetes assigns service addresses from the unique local address range (fc00::/7) because you can't specify a custom IPv6 CIDR block when you create the cluster" + type = string + default = null +} + +variable "outpost_config" { + description = "Configuration for the AWS Outpost to provision the cluster on" + type = any + default = {} +} + variable "cluster_encryption_config" { - description = "Configuration block with encryption configuration for the cluster" - type = list(any) - default = [] + description = "Configuration block with encryption configuration for the cluster. To disable secret encryption, set this value to `{}`" + type = any + default = { + resources = ["secrets"] + } } variable "attach_cluster_encryption_policy" { @@ -116,6 +136,22 @@ variable "cluster_timeouts" { default = {} } +################################################################################ +# Access Entry +################################################################################ + +variable "access_entries" { + description = "Map of access entries to add to the cluster" + type = any + default = {} +} + +variable "enable_cluster_creator_admin_permissions" { + description = "Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry" + type = bool + default = false +} + ################################################################################ # KMS Key ################################################################################ @@ -123,7 +159,7 @@ variable "cluster_timeouts" { variable "create_kms_key" { description = "Controls if a KMS key for cluster encryption should be created" type = bool - default = false + default = true } variable "kms_key_description" { @@ -139,15 +175,15 @@ variable "kms_key_deletion_window_in_days" { } variable "enable_kms_key_rotation" { - description = "Specifies whether key rotation is enabled. Defaults to `true`" + description = "Specifies whether key rotation is enabled" type = bool default = true } variable "kms_key_enable_default_policy" { - description = "Specifies whether to enable the default key policy. Defaults to `false`" + description = "Specifies whether to enable the default key policy" type = bool - default = false + default = true } variable "kms_key_owners" { @@ -214,24 +250,36 @@ variable "cloudwatch_log_group_kms_key_id" { default = null } +variable "cloudwatch_log_group_class" { + description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`" + type = string + default = null +} + +variable "cloudwatch_log_group_tags" { + description = "A map of additional tags to add to the cloudwatch log group created" + type = map(string) + default = {} +} + ################################################################################ # Cluster Security Group ################################################################################ variable "create_cluster_security_group" { - description = "Determines if a security group is created for the cluster or use the existing `cluster_security_group_id`" + description = "Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default" type = bool default = true } variable "cluster_security_group_id" { - description = "Existing security group ID to be attached to the cluster. Required if `create_cluster_security_group` = `false`" + description = "Existing security group ID to be attached to the cluster" type = string default = "" } variable "vpc_id" { - description = "ID of the VPC where the cluster and its nodes will be provisioned" + description = "ID of the VPC where the cluster security group will be provisioned" type = string default = null } @@ -316,24 +364,22 @@ variable "node_security_group_additional_rules" { default = {} } +variable "node_security_group_enable_recommended_rules" { + description = "Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic" + type = bool + default = true +} + variable "node_security_group_tags" { description = "A map of additional tags to add to the node security group created" type = map(string) default = {} } -# TODO - at next breaking change, make 169.254.169.123/32 the default -variable "node_security_group_ntp_ipv4_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"169.254.169.123/32\"]`" - type = list(string) - default = ["0.0.0.0/0"] -} - -# TODO - at next breaking change, make fd00:ec2::123/128 the default -variable "node_security_group_ntp_ipv6_cidr_block" { - description = "IPv4 CIDR block to allow NTP egress. Default is public IP space, but [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html) can be used as well with `[\"fd00:ec2::123/128\"]`" - type = list(string) - default = ["::/0"] +variable "enable_efa_support" { + description = "Determines whether to enable Elastic Fabric Adapter (EFA) support" + type = bool + default = false } ################################################################################ @@ -352,6 +398,12 @@ variable "openid_connect_audiences" { default = [] } +variable "include_oidc_root_ca_thumbprint" { + description = "Determines whether to include the root CA thumbprint in the OpenID Connect (OIDC) identity provider's server certificate(s)" + type = bool + default = true +} + variable "custom_oidc_thumbprints" { description = "Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s)" type = list(string) @@ -406,14 +458,8 @@ variable "iam_role_permissions_boundary" { variable "iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] -} - -variable "cluster_iam_role_dns_suffix" { - description = "Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China)" - type = string - default = null + type = map(string) + default = {} } variable "iam_role_tags" { @@ -452,6 +498,12 @@ variable "cluster_encryption_policy_tags" { default = {} } +variable "dataplane_wait_duration" { + description = "Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed node group(s), self-managed node group(s), Fargate profile(s))" + type = string + default = "30s" +} + ################################################################################ # EKS Addons ################################################################################ @@ -462,6 +514,12 @@ variable "cluster_addons" { default = {} } +variable "cluster_addons_timeouts" { + description = "Create, update, and delete timeout configurations for the cluster addons" + type = map(string) + default = {} +} + ################################################################################ # EKS Identity Provider ################################################################################ @@ -519,56 +577,3 @@ variable "eks_managed_node_group_defaults" { type = any default = {} } - - -################################################################################ -# aws-auth configmap -################################################################################ - -variable "manage_aws_auth_configmap" { - description = "Determines whether to manage the aws-auth configmap" - type = bool - default = false -} - -variable "create_aws_auth_configmap" { - description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" - type = bool - default = false -} - -variable "aws_auth_node_iam_role_arns_non_windows" { - description = "List of non-Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_node_iam_role_arns_windows" { - description = "List of Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_fargate_profile_pod_execution_role_arns" { - description = "List of Fargate profile pod execution role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_roles" { - description = "List of role maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_users" { - description = "List of user maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_accounts" { - description = "List of account maps to add to the aws-auth configmap" - type = list(any) - default = [] -} diff --git a/modules/aws-eks/versions.tf b/modules/aws-eks/versions.tf index fde7af0..d0f347a 100644 --- a/modules/aws-eks/versions.tf +++ b/modules/aws-eks/versions.tf @@ -1,18 +1,18 @@ terraform { - required_version = ">= 0.13.1" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 3.72" + version = ">= 5.40" } tls = { source = "hashicorp/tls" version = ">= 3.0" } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + time = { + source = "hashicorp/time" + version = ">= 0.9" } } }