diff --git a/.github/workflows/e2e-parallel-full.yml b/.github/workflows/e2e-parallel-full.yml index 207527918f..9e80a2e819 100644 --- a/.github/workflows/e2e-parallel-full.yml +++ b/.github/workflows/e2e-parallel-full.yml @@ -61,7 +61,7 @@ jobs: - name: Ensure log groups are removed run: | pip3 install boto3 - python3 .github/workflows/delete-log-groups.py + python3 .github/scripts/delete-log-groups.py - name: Iamlive Setup & Run run: | diff --git a/.github/workflows/iam-policy-generator.py b/.github/workflows/iam-policy-generator.py deleted file mode 100644 index 6b9022d3a7..0000000000 --- a/.github/workflows/iam-policy-generator.py +++ /dev/null @@ -1,35 +0,0 @@ -import json -import boto3 -import os - -iam_actions = [] -s3 = boto3.resource('s3') -bucket_name = os.getenv('BUCKET_NAME') -bucket = s3.Bucket(bucket_name) -bucket_files = [x.key for x in bucket.objects.all()] - -# Read all the files from the bucket -for file in bucket_files: - obj = s3.Object(bucket_name, file) - f = obj.get()['Body'].read() - data = json.loads(f) - # Merge all policies actions, keep them unique with 'set' - for statement in data['Statement']: - iam_actions = list(set(iam_actions + statement['Action'])) - -# Skeleton IAM policy template , wild card all resources for now. -template = { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - ], - "Resource": "*" - } - ] -} - -# Apply merged actions to the skeleton IAM policy -template['Statement'][0]['Action'] = sorted(iam_actions) -print(json.dumps(template, indent=4)) diff --git a/.github/workflows/plan-examples.py b/.github/workflows/plan-examples.py deleted file mode 100644 index 4c24a6bf95..0000000000 --- a/.github/workflows/plan-examples.py +++ /dev/null @@ -1,30 +0,0 @@ -import json -import glob -import re - - -def get_examples(): - """ - Get all Terraform example root directories using their respective `versions.tf`; - returning a string formatted json array of the example directories minus those that are excluded - """ - exclude = { - 'examples/appmesh-mtls', # excluded until Rout53 is setup - 'examples/eks-cluster-with-external-dns', # excluded until Rout53 is setup - 'examples/fully-private-eks-cluster/vpc', # skipping until issue #711 is addressed - 'examples/fully-private-eks-cluster/eks', - 'examples/fully-private-eks-cluster/add-ons', - 'examples/ai-ml/ray', # excluded until #887 is fixed - } - - projects = { - x.replace('/versions.tf', '') - for x in glob.glob('examples/**/versions.tf', recursive=True) - if not re.match(r'^.+/_', x) - } - - print(json.dumps(list(projects.difference(exclude)))) - - -if __name__ == '__main__': - get_examples() diff --git a/README.md b/README.md index c88be7c5f0..0af9b94da8 100644 --- a/README.md +++ b/README.md @@ -37,161 +37,9 @@ AWS customers have asked for examples that demonstrate how to integrate the land ## Support & Feedback EKS Blueprints for Terraform is maintained by AWS Solution Architects. It is not part of an AWS service and support is provided best-effort by the EKS Blueprints community. - To post feedback, submit feature ideas, or report bugs, please use the [Issues section](https://github.com/aws-ia/terraform-aws-eks-blueprints/issues) of this GitHub repo. - If you are interested in contributing to EKS Blueprints, see the [Contribution guide](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/CONTRIBUTING.md). - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0.0 | -| [aws](#requirement\_aws) | >= 3.72 | -| [helm](#requirement\_helm) | >= 2.4.1 | -| [http](#requirement\_http) | 2.4.1 | -| [kubectl](#requirement\_kubectl) | >= 1.14 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [local](#requirement\_local) | >= 2.1 | -| [null](#requirement\_null) | >= 3.1 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | >= 3.72 | -| [http](#provider\_http) | 2.4.1 | -| [kubernetes](#provider\_kubernetes) | >= 2.10 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [aws\_eks](#module\_aws\_eks) | terraform-aws-modules/eks/aws | v18.26.6 | -| [aws\_eks\_fargate\_profiles](#module\_aws\_eks\_fargate\_profiles) | ./modules/aws-eks-fargate-profiles | n/a | -| [aws\_eks\_managed\_node\_groups](#module\_aws\_eks\_managed\_node\_groups) | ./modules/aws-eks-managed-node-groups | n/a | -| [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a | -| [aws\_eks\_teams](#module\_aws\_eks\_teams) | ./modules/aws-eks-teams | n/a | -| [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a | -| [kms](#module\_kms) | ./modules/aws-kms | n/a | - -## Resources - -| Name | Type | -|------|------| -| [kubernetes_config_map.amazon_vpc_cni](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | -| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | -| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_iam_policy_document.eks_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_session_context.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_session_context) | data source | -| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | -| [http_http.eks_cluster_readiness](https://registry.terraform.io/providers/terraform-aws-modules/http/2.4.1/docs/data-sources/http) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [application\_teams](#input\_application\_teams) | Map of maps of Application Teams to create | `any` | `{}` | no | -| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no | -| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no | -| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no | -| [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | -| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | -| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster |
list(object({
provider_key_arn = string
resources = list(string)
}))
| `[]` | no | -| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the EKS private API server endpoint is enabled. Default to EKS resource and it is false | `bool` | `false` | no | -| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the EKS public API server endpoint is enabled. Default to EKS resource and it is true | `bool` | `true` | no | -| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no | -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `"ipv4"` | no | -| [cluster\_kms\_key\_additional\_admin\_arns](#input\_cluster\_kms\_key\_additional\_admin\_arns) | A list of additional IAM ARNs that should have FULL access (kms:*) in the KMS key policy | `list(string)` | `[]` | no | -| [cluster\_kms\_key\_arn](#input\_cluster\_kms\_key\_arn) | A valid EKS Cluster KMS Key ARN to encrypt Kubernetes secrets | `string` | `null` | no | -| [cluster\_kms\_key\_deletion\_window\_in\_days](#input\_cluster\_kms\_key\_deletion\_window\_in\_days) | The waiting period, specified in number of days (7 - 30). After the waiting period ends, AWS KMS deletes the KMS key | `number` | `30` | no | -| [cluster\_name](#input\_cluster\_name) | EKS Cluster Name | `string` | `""` | no | -| [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | -| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Security group to be used if creation of cluster security group is turned off | `string` | `""` | no | -| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no | -| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | -| [cluster\_service\_ipv6\_cidr](#input\_cluster\_service\_ipv6\_cidr) | The IPV6 Service CIDR block to assign Kubernetes service IP addresses | `string` | `null` | no | -| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | -| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`) | `string` | `"1.23"` | no | -| [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no | -| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `false` | no | -| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Toggle to create or assign cluster security group | `bool` | `true` | no | -| [create\_eks](#input\_create\_eks) | Create EKS cluster | `bool` | `true` | no | -| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no | -| [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no | -| [eks\_readiness\_timeout](#input\_eks\_readiness\_timeout) | The maximum time (in seconds) to wait for EKS API server endpoint to become healthy | `number` | `"600"` | no | -| [emr\_on\_eks\_teams](#input\_emr\_on\_eks\_teams) | EMR on EKS Teams config | `any` | `{}` | no | -| [enable\_emr\_on\_eks](#input\_enable\_emr\_on\_eks) | Enable EMR on EKS | `bool` | `false` | no | -| [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no | -| [enable\_windows\_support](#input\_enable\_windows\_support) | Enable Windows support | `bool` | `false` | no | -| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profile configuration | `any` | `{}` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | -| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no | -| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | -| [iam\_role\_path](#input\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no | -| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | -| [managed\_node\_groups](#input\_managed\_node\_groups) | Managed node groups configuration | `any` | `{}` | no | -| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth ConfigMap | `list(string)` | `[]` | no | -| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth ConfigMap |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth ConfigMap |
list(object({
userarn = string
username = string
groups = list(string)
}))
| `[]` | no | -| [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | -| [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no | -| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no | -| [platform\_teams](#input\_platform\_teams) | Map of maps of platform teams to create | `any` | `{}` | no | -| [private\_subnet\_ids](#input\_private\_subnet\_ids) | List of private subnets Ids for the cluster and worker nodes | `list(string)` | `[]` | no | -| [public\_subnet\_ids](#input\_public\_subnet\_ids) | List of public subnets Ids for the worker nodes | `list(string)` | `[]` | no | -| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Self-managed node groups configuration | `any` | `{}` | no | -| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit`,`XYZ`) | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | VPC Id | `string` | n/a | yes | -| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | -| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | -| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | EKS Control Plane Security Group ID | -| [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig | -| [eks\_cluster\_arn](#output\_eks\_cluster\_arn) | Amazon EKS Cluster Name | -| [eks\_cluster\_certificate\_authority\_data](#output\_eks\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | -| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | Endpoint for your Kubernetes API server | -| [eks\_cluster\_id](#output\_eks\_cluster\_id) | Amazon EKS Cluster Name | -| [eks\_cluster\_status](#output\_eks\_cluster\_status) | Amazon EKS Cluster Status | -| [eks\_cluster\_version](#output\_eks\_cluster\_version) | The Kubernetes version for the cluster | -| [eks\_oidc\_issuer\_url](#output\_eks\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer | -| [eks\_oidc\_provider\_arn](#output\_eks\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. | -| [emr\_on\_eks\_role\_arn](#output\_emr\_on\_eks\_role\_arn) | IAM execution role ARN for EMR on EKS | -| [emr\_on\_eks\_role\_id](#output\_emr\_on\_eks\_role\_id) | IAM execution role ID for EMR on EKS | -| [fargate\_profiles](#output\_fargate\_profiles) | Outputs from EKS Fargate profiles groups | -| [fargate\_profiles\_aws\_auth\_config\_map](#output\_fargate\_profiles\_aws\_auth\_config\_map) | Fargate profiles AWS auth map | -| [fargate\_profiles\_iam\_role\_arns](#output\_fargate\_profiles\_iam\_role\_arns) | IAM role arn's for Fargate Profiles | -| [managed\_node\_group\_arn](#output\_managed\_node\_group\_arn) | Managed node group arn | -| [managed\_node\_group\_aws\_auth\_config\_map](#output\_managed\_node\_group\_aws\_auth\_config\_map) | Managed node groups AWS auth map | -| [managed\_node\_group\_iam\_instance\_profile\_arns](#output\_managed\_node\_group\_iam\_instance\_profile\_arns) | IAM instance profile arn's of managed node groups | -| [managed\_node\_group\_iam\_instance\_profile\_id](#output\_managed\_node\_group\_iam\_instance\_profile\_id) | IAM instance profile id of managed node groups | -| [managed\_node\_group\_iam\_role\_arns](#output\_managed\_node\_group\_iam\_role\_arns) | IAM role arn's of managed node groups | -| [managed\_node\_group\_iam\_role\_names](#output\_managed\_node\_group\_iam\_role\_names) | IAM role names of managed node groups | -| [managed\_node\_groups](#output\_managed\_node\_groups) | Outputs from EKS Managed node groups | -| [managed\_node\_groups\_id](#output\_managed\_node\_groups\_id) | EKS Managed node groups id | -| [managed\_node\_groups\_status](#output\_managed\_node\_groups\_status) | EKS Managed node groups status | -| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | -| [self\_managed\_node\_group\_autoscaling\_groups](#output\_self\_managed\_node\_group\_autoscaling\_groups) | Autoscaling group names of self managed node groups | -| [self\_managed\_node\_group\_aws\_auth\_config\_map](#output\_self\_managed\_node\_group\_aws\_auth\_config\_map) | Self managed node groups AWS auth map | -| [self\_managed\_node\_group\_iam\_instance\_profile\_id](#output\_self\_managed\_node\_group\_iam\_instance\_profile\_id) | IAM instance profile id of managed node groups | -| [self\_managed\_node\_group\_iam\_role\_arns](#output\_self\_managed\_node\_group\_iam\_role\_arns) | IAM role arn's of self managed node groups | -| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Outputs from EKS Self-managed node groups | -| [teams](#output\_teams) | Outputs from EKS Fargate profiles groups | -| [windows\_node\_group\_aws\_auth\_config\_map](#output\_windows\_node\_group\_aws\_auth\_config\_map) | Windows node groups AWS auth map | -| [worker\_node\_security\_group\_arn](#output\_worker\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the worker node shared security group | -| [worker\_node\_security\_group\_id](#output\_worker\_node\_security\_group\_id) | ID of the worker node shared security group | - - ## Security See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf index 69510a0688..91db419835 100644 --- a/examples/agones-game-controller/main.tf +++ b/examples/agones-game-controller/main.tf @@ -3,21 +3,21 @@ provider "aws" { } provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } provider "helm" { kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } } data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } data "aws_availability_zones" "available" {} @@ -39,48 +39,45 @@ locals { # EKS Blueprints #--------------------------------------------------------------- -module "eks_blueprints" { - source = "../.." +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.30" cluster_name = local.name cluster_version = "1.23" - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets - - managed_node_groups = { - mg_5 = { - node_group_name = "managed-ondemand" - create_launch_template = true - launch_template_os = "amazonlinux2eks" - public_ip = true - pre_userdata = <<-EOT - yum install -y amazon-ssm-agent - systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent" - EOT - - desired_size = 3 - max_size = 12 - min_size = 3 - max_unavailable = 1 - - ami_type = "AL2_x86_64" - capacity_type = "ON_DEMAND" + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + cluster_endpoint_private_access = true + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + node_security_group_additional_rules = { + ingress_nodes_ephemeral = { + description = "Node-to-node on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" + self = true + } + egress_all = { + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + } + } + + eks_managed_node_groups = { + default = { instance_types = ["m5.large"] - disk_size = 50 - - subnet_ids = module.vpc.public_subnets - - k8s_labels = { - Environment = "preprod" - Zone = "dev" - WorkerType = "ON_DEMAND" - } - additional_tags = { - ExtraTag = "m5x-on-demand" - Name = "m5x-on-demand" - subnet_type = "public" - } + + min_size = 1 + max_size = 3 + desired_size = 1 } } @@ -90,11 +87,13 @@ module "eks_blueprints" { module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id + eks_cluster_id = module.eks.cluster_id + eks_cluster_endpoint = module.eks.cluster_endpoint + eks_oidc_provider = module.eks.oidc_provider + eks_cluster_version = module.eks.cluster_version + + # Wait on the `kube-system` profile before provisioning addons + data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn # Add-ons enable_metrics_server = true @@ -139,7 +138,6 @@ module "vpc" { single_nat_gateway = true enable_dns_hostnames = true - # Manage so we can name manage_default_network_acl = true default_network_acl_tags = { Name = "${local.name}-default" } manage_default_route_table = true @@ -148,13 +146,11 @@ module "vpc" { default_security_group_tags = { Name = "${local.name}-default" } public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/agones-game-controller/outputs.tf b/examples/agones-game-controller/outputs.tf index 55552d3138..b7decade8e 100644 --- a/examples/agones-game-controller/outputs.tf +++ b/examples/agones-game-controller/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}" } diff --git a/examples/ai-ml/ray/main.tf b/examples/ai-ml/ray/main.tf index 8b07bedc25..b569724988 100644 --- a/examples/ai-ml/ray/main.tf +++ b/examples/ai-ml/ray/main.tf @@ -3,30 +3,16 @@ provider "aws" { } provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] - } } provider "helm" { kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id] - } } } @@ -35,6 +21,10 @@ provider "grafana" { auth = "admin:${aws_secretsmanager_secret_version.grafana.secret_string}" } +data "aws_eks_cluster_auth" "this" { + name = module.eks.cluster_id +} + data "aws_availability_zones" "available" {} data "aws_caller_identity" "current" {} @@ -45,10 +35,6 @@ data "aws_acm_certificate" "issued" { statuses = ["ISSUED"] } -data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id -} - locals { name = basename(path.cwd) namespace = "ray-cluster" @@ -66,60 +52,46 @@ locals { #--------------------------------------------------------------- # EKS Blueprints #--------------------------------------------------------------- -module "eks_blueprints" { - source = "../../.." + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.30" cluster_name = local.name cluster_version = "1.23" - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + cluster_endpoint_private_access = true + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets - #----------------------------------------------------------------------------------------------------------# - # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). - # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html. - # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons - # See the notes below for each rule used in these examples - #----------------------------------------------------------------------------------------------------------# node_security_group_additional_rules = { - # Extend node-to-node security group rules. Recommended and required for the Add-ons - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 + ingress_nodes_ephemeral = { + description = "Node-to-node on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 type = "ingress" self = true } - # Recommended outbound traffic for Node groups egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane. - # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc. - # Change this according to your security requirements if needed - ingress_cluster_to_node_all_traffic = { - description = "Cluster API to Nodegroup all traffic" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - source_cluster_security_group = true + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] } } - managed_node_groups = { - mg_5 = { - node_group_name = "managed-ondemand" - instance_types = ["m5.8xlarge"] - min_size = 3 - subnet_ids = module.vpc.private_subnets + eks_managed_node_groups = { + default = { + instance_types = ["m5.large"] + + min_size = 1 + max_size = 3 + desired_size = 1 } } @@ -132,10 +104,10 @@ module "eks_blueprints" { module "eks_blueprints_kubernetes_addons" { source = "../../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version + eks_cluster_id = module.eks.cluster_id + eks_cluster_endpoint = module.eks.cluster_endpoint + eks_oidc_provider = module.eks.oidc_provider + eks_cluster_version = module.eks.cluster_version eks_cluster_domain = var.eks_cluster_domain # Add-Ons @@ -241,7 +213,7 @@ data "aws_iam_policy_document" "irsa_policy" { resource "aws_iam_policy" "irsa_policy" { description = "IAM Policy for IRSA" - name_prefix = substr("${module.eks_blueprints.eks_cluster_id}-${local.namespace}-access", 0, 127) + name_prefix = substr("${module.eks.cluster_id}-${local.namespace}-access", 0, 127) policy = data.aws_iam_policy_document.irsa_policy.json } @@ -250,8 +222,8 @@ module "cluster_irsa" { kubernetes_namespace = local.namespace kubernetes_service_account = "${local.namespace}-sa" irsa_iam_policies = [aws_iam_policy.irsa_policy.arn] - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_oidc_provider_arn = module.eks_blueprints.eks_oidc_provider_arn + eks_cluster_id = module.eks.cluster_id + eks_oidc_provider_arn = module.eks.oidc_provider_arn depends_on = [module.s3_bucket] } @@ -335,7 +307,6 @@ module "vpc" { single_nat_gateway = true enable_dns_hostnames = true - # Manage so we can name manage_default_network_acl = true default_network_acl_tags = { Name = "${local.name}-default" } manage_default_route_table = true @@ -344,13 +315,11 @@ module "vpc" { default_security_group_tags = { Name = "${local.name}-default" } public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/ai-ml/ray/outputs.tf b/examples/ai-ml/ray/outputs.tf index f5e106a5c6..ebd7f83909 100644 --- a/examples/ai-ml/ray/outputs.tf +++ b/examples/ai-ml/ray/outputs.tf @@ -1,6 +1,6 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}" } output "s3_bucket" { diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 73eaf51025..95aa26a989 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -3,29 +3,29 @@ provider "aws" { } provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } provider "helm" { kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } } provider "kubectl" { apply_retry_count = 10 - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false token = data.aws_eks_cluster_auth.this.token } data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } data "aws_availability_zones" "available" {} @@ -48,28 +48,45 @@ locals { # EKS Blueprints #--------------------------------------------------------------- -module "eks_blueprints" { - source = "../.." +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.30" cluster_name = local.name cluster_version = "1.23" - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + cluster_endpoint_private_access = true - managed_node_groups = { - this = { - node_group_name = local.name - instance_types = ["m5.large"] - subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + node_security_group_additional_rules = { + ingress_nodes_ephemeral = { + description = "Node-to-node on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 + type = "ingress" + self = true + } + egress_all = { + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] + } + } + + eks_managed_node_groups = { + default = { + instance_types = ["m5.large"] min_size = 1 - max_size = 2 + max_size = 3 desired_size = 1 - - update_config = [{ - max_unavailable_percentage = 30 - }] } } @@ -79,12 +96,15 @@ module "eks_blueprints" { module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version + eks_cluster_id = module.eks.cluster_id + eks_cluster_endpoint = module.eks.cluster_endpoint + eks_oidc_provider = module.eks.oidc_provider + eks_cluster_version = module.eks.cluster_version eks_cluster_domain = var.eks_cluster_domain + # Wait on the `kube-system` profile before provisioning addons + data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn + enable_amazon_eks_vpc_cni = true enable_amazon_eks_coredns = true enable_amazon_eks_kube_proxy = true @@ -141,7 +161,7 @@ resource "kubectl_manifest" "cluster_pca_issuer" { kind = "AWSPCAClusterIssuer" metadata = { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } spec = { @@ -169,7 +189,7 @@ resource "kubectl_manifest" "example_pca_certificate" { issuerRef = { group = "awspca.cert-manager.io" kind = "AWSPCAClusterIssuer" - name : module.eks_blueprints.eks_cluster_id + name : module.eks.cluster_id } renewBefore = "360h0m0s" # This is the name with which the K8 Secret will be available @@ -218,13 +238,11 @@ module "vpc" { default_security_group_tags = { Name = "${local.name}-default" } public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/appmesh-mtls/outputs.tf b/examples/appmesh-mtls/outputs.tf index 55552d3138..b7decade8e 100644 --- a/examples/appmesh-mtls/outputs.tf +++ b/examples/appmesh-mtls/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}" } diff --git a/examples/complete-kubernetes-addons/main.tf b/examples/complete-kubernetes-addons/main.tf index bd0c9c7fc9..ce68f7cf1b 100644 --- a/examples/complete-kubernetes-addons/main.tf +++ b/examples/complete-kubernetes-addons/main.tf @@ -3,21 +3,21 @@ provider "aws" { } provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } provider "helm" { kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } } data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } data "aws_availability_zones" "available" {} @@ -39,89 +39,45 @@ locals { # EKS Blueprints #--------------------------------------------------------------- -module "eks_blueprints" { - source = "../.." +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.30" cluster_name = local.name cluster_version = "1.23" - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + cluster_endpoint_private_access = true + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets - #----------------------------------------------------------------------------------------------------------# - # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks). - # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html. - # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons - # See the notes below for each rule used in these examples - #----------------------------------------------------------------------------------------------------------# node_security_group_additional_rules = { - # Extend node-to-node security group rules. Recommended and required for the Add-ons - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 + ingress_nodes_ephemeral = { + description = "Node-to-node on ephemeral ports" + protocol = "tcp" + from_port = 1025 + to_port = 65535 type = "ingress" self = true } - # Recommended outbound traffic for Node groups egress_all = { - description = "Node all egress" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "egress" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane. - # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc. - # Change this according to your security requirements if needed - ingress_cluster_to_node_all_traffic = { - description = "Cluster API to Nodegroup all traffic" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - source_cluster_security_group = true - } - } - - managed_node_groups = { - mg_5 = { - node_group_name = "managed-ondemand" - instance_types = ["m5.large"] - subnet_ids = module.vpc.private_subnets - force_update_version = true - } - } - - self_managed_node_groups = { - self_mg_5 = { - node_group_name = "self-managed-ondemand" - instance_type = "m5.large" - launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows - custom_ami_id = data.aws_ami.eks.id # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc. - subnet_ids = module.vpc.private_subnets + description = "Allow all egress" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + cidr_blocks = ["0.0.0.0/0"] } } - fargate_profiles = { + eks_managed_node_groups = { default = { - fargate_profile_name = "default" - fargate_profile_namespaces = [ - { - namespace = "default" - k8s_labels = { - Environment = "preprod" - Zone = "dev" - env = "fargate" - } - }] - subnet_ids = module.vpc.private_subnets - additional_tags = { - ExtraTag = "Fargate" - } + instance_types = ["m5.large"] + + min_size = 1 + max_size = 3 + desired_size = 2 } } @@ -131,12 +87,14 @@ module "eks_blueprints" { module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version - eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id - auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups + eks_cluster_id = module.eks.cluster_id + eks_cluster_endpoint = module.eks.cluster_endpoint + eks_oidc_provider = module.eks.oidc_provider + eks_cluster_version = module.eks.cluster_version + eks_worker_security_group_id = module.eks.node_security_group_id + + # Wait on the `kube-system` profile before provisioning addons + data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn # EKS Addons enable_amazon_eks_vpc_cni = true @@ -164,11 +122,11 @@ module "eks_blueprints_kubernetes_addons" { repository = "https://aws.github.io/eks-charts" version = "0.1.18" namespace = "logging" - aws_for_fluent_bit_cw_log_group = "/${module.eks_blueprints.eks_cluster_id}/worker-fluentbit-logs" # Optional + aws_for_fluent_bit_cw_log_group = "/${module.eks.cluster_id}/worker-fluentbit-logs" # Optional create_namespace = true values = [templatefile("${path.module}/helm_values/aws-for-fluentbit-values.yaml", { region = local.region - aws_for_fluent_bit_cw_log_group = "/${module.eks_blueprints.eks_cluster_id}/worker-fluentbit-logs" + aws_for_fluent_bit_cw_log_group = "/${module.eks.cluster_id}/worker-fluentbit-logs" })] set = [ { @@ -178,40 +136,6 @@ module "eks_blueprints_kubernetes_addons" { ] } - enable_fargate_fluentbit = true - fargate_fluentbit_addon_config = { - output_conf = <<-EOF - [OUTPUT] - Name cloudwatch_logs - Match * - region ${local.region} - log_group_name /${module.eks_blueprints.eks_cluster_id}/fargate-fluentbit-logs - log_stream_prefix "fargate-logs-" - auto_create_group true - EOF - - filters_conf = <<-EOF - [FILTER] - Name parser - Match * - Key_Name log - Parser regex - Preserve_Key True - Reserve_Data True - EOF - - parsers_conf = <<-EOF - [PARSER] - Name regex - Format regex - Regex ^(?