diff --git a/.github/workflows/e2e-parallel-full.yml b/.github/workflows/e2e-parallel-full.yml
index 207527918f..9e80a2e819 100644
--- a/.github/workflows/e2e-parallel-full.yml
+++ b/.github/workflows/e2e-parallel-full.yml
@@ -61,7 +61,7 @@ jobs:
- name: Ensure log groups are removed
run: |
pip3 install boto3
- python3 .github/workflows/delete-log-groups.py
+ python3 .github/scripts/delete-log-groups.py
- name: Iamlive Setup & Run
run: |
diff --git a/.github/workflows/iam-policy-generator.py b/.github/workflows/iam-policy-generator.py
deleted file mode 100644
index 6b9022d3a7..0000000000
--- a/.github/workflows/iam-policy-generator.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import json
-import boto3
-import os
-
-iam_actions = []
-s3 = boto3.resource('s3')
-bucket_name = os.getenv('BUCKET_NAME')
-bucket = s3.Bucket(bucket_name)
-bucket_files = [x.key for x in bucket.objects.all()]
-
-# Read all the files from the bucket
-for file in bucket_files:
- obj = s3.Object(bucket_name, file)
- f = obj.get()['Body'].read()
- data = json.loads(f)
- # Merge all policies actions, keep them unique with 'set'
- for statement in data['Statement']:
- iam_actions = list(set(iam_actions + statement['Action']))
-
-# Skeleton IAM policy template , wild card all resources for now.
-template = {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Action": [
- ],
- "Resource": "*"
- }
- ]
-}
-
-# Apply merged actions to the skeleton IAM policy
-template['Statement'][0]['Action'] = sorted(iam_actions)
-print(json.dumps(template, indent=4))
diff --git a/.github/workflows/plan-examples.py b/.github/workflows/plan-examples.py
deleted file mode 100644
index 4c24a6bf95..0000000000
--- a/.github/workflows/plan-examples.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import json
-import glob
-import re
-
-
-def get_examples():
- """
- Get all Terraform example root directories using their respective `versions.tf`;
- returning a string formatted json array of the example directories minus those that are excluded
- """
- exclude = {
- 'examples/appmesh-mtls', # excluded until Rout53 is setup
- 'examples/eks-cluster-with-external-dns', # excluded until Rout53 is setup
- 'examples/fully-private-eks-cluster/vpc', # skipping until issue #711 is addressed
- 'examples/fully-private-eks-cluster/eks',
- 'examples/fully-private-eks-cluster/add-ons',
- 'examples/ai-ml/ray', # excluded until #887 is fixed
- }
-
- projects = {
- x.replace('/versions.tf', '')
- for x in glob.glob('examples/**/versions.tf', recursive=True)
- if not re.match(r'^.+/_', x)
- }
-
- print(json.dumps(list(projects.difference(exclude))))
-
-
-if __name__ == '__main__':
- get_examples()
diff --git a/README.md b/README.md
index c88be7c5f0..0af9b94da8 100644
--- a/README.md
+++ b/README.md
@@ -37,161 +37,9 @@ AWS customers have asked for examples that demonstrate how to integrate the land
## Support & Feedback
EKS Blueprints for Terraform is maintained by AWS Solution Architects. It is not part of an AWS service and support is provided best-effort by the EKS Blueprints community.
-
To post feedback, submit feature ideas, or report bugs, please use the [Issues section](https://github.com/aws-ia/terraform-aws-eks-blueprints/issues) of this GitHub repo.
-
If you are interested in contributing to EKS Blueprints, see the [Contribution guide](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/CONTRIBUTING.md).
-
-## Requirements
-
-| Name | Version |
-|------|---------|
-| [terraform](#requirement\_terraform) | >= 1.0.0 |
-| [aws](#requirement\_aws) | >= 3.72 |
-| [helm](#requirement\_helm) | >= 2.4.1 |
-| [http](#requirement\_http) | 2.4.1 |
-| [kubectl](#requirement\_kubectl) | >= 1.14 |
-| [kubernetes](#requirement\_kubernetes) | >= 2.10 |
-| [local](#requirement\_local) | >= 2.1 |
-| [null](#requirement\_null) | >= 3.1 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| [aws](#provider\_aws) | >= 3.72 |
-| [http](#provider\_http) | 2.4.1 |
-| [kubernetes](#provider\_kubernetes) | >= 2.10 |
-
-## Modules
-
-| Name | Source | Version |
-|------|--------|---------|
-| [aws\_eks](#module\_aws\_eks) | terraform-aws-modules/eks/aws | v18.26.6 |
-| [aws\_eks\_fargate\_profiles](#module\_aws\_eks\_fargate\_profiles) | ./modules/aws-eks-fargate-profiles | n/a |
-| [aws\_eks\_managed\_node\_groups](#module\_aws\_eks\_managed\_node\_groups) | ./modules/aws-eks-managed-node-groups | n/a |
-| [aws\_eks\_self\_managed\_node\_groups](#module\_aws\_eks\_self\_managed\_node\_groups) | ./modules/aws-eks-self-managed-node-groups | n/a |
-| [aws\_eks\_teams](#module\_aws\_eks\_teams) | ./modules/aws-eks-teams | n/a |
-| [emr\_on\_eks](#module\_emr\_on\_eks) | ./modules/emr-on-eks | n/a |
-| [kms](#module\_kms) | ./modules/aws-kms | n/a |
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [kubernetes_config_map.amazon_vpc_cni](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
-| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
-| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
-| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
-| [aws_iam_policy_document.eks_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
-| [aws_iam_session_context.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_session_context) | data source |
-| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
-| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
-| [http_http.eks_cluster_readiness](https://registry.terraform.io/providers/terraform-aws-modules/http/2.4.1/docs/data-sources/http) | data source |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| [application\_teams](#input\_application\_teams) | Map of maps of Application Teams to create | `any` | `{}` | no |
-| [aws\_auth\_additional\_labels](#input\_aws\_auth\_additional\_labels) | Additional kubernetes labels applied on aws-auth ConfigMap | `map(string)` | `{}` | no |
-| [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
-| [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
-| [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
-| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable | `list(string)` |
[ "api", "audit", "authenticator", "controllerManager", "scheduler" ] | no |
-| [cluster\_encryption\_config](#input\_cluster\_encryption\_config) | Configuration block with encryption configuration for the cluster | list(object({ provider_key_arn = string resources = list(string) })) | `[]` | no |
-| [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the EKS private API server endpoint is enabled. Default to EKS resource and it is false | `bool` | `false` | no |
-| [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the EKS public API server endpoint is enabled. Default to EKS resource and it is true | `bool` | `true` | no |
-| [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` | [ "0.0.0.0/0" ] | no |
-| [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no |
-| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `"ipv4"` | no |
-| [cluster\_kms\_key\_additional\_admin\_arns](#input\_cluster\_kms\_key\_additional\_admin\_arns) | A list of additional IAM ARNs that should have FULL access (kms:*) in the KMS key policy | `list(string)` | `[]` | no |
-| [cluster\_kms\_key\_arn](#input\_cluster\_kms\_key\_arn) | A valid EKS Cluster KMS Key ARN to encrypt Kubernetes secrets | `string` | `null` | no |
-| [cluster\_kms\_key\_deletion\_window\_in\_days](#input\_cluster\_kms\_key\_deletion\_window\_in\_days) | The waiting period, specified in number of days (7 - 30). After the waiting period ends, AWS KMS deletes the KMS key | `number` | `30` | no |
-| [cluster\_name](#input\_cluster\_name) | EKS Cluster Name | `string` | `""` | no |
-| [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no |
-| [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | Security group to be used if creation of cluster security group is turned off | `string` | `""` | no |
-| [cluster\_security\_group\_tags](#input\_cluster\_security\_group\_tags) | A map of additional tags to add to the cluster security group created | `map(string)` | `{}` | no |
-| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
-| [cluster\_service\_ipv6\_cidr](#input\_cluster\_service\_ipv6\_cidr) | The IPV6 Service CIDR block to assign Kubernetes service IP addresses | `string` | `null` | no |
-| [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
-| [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`) | `string` | `"1.23"` | no |
-| [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no |
-| [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `false` | no |
-| [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Toggle to create or assign cluster security group | `bool` | `true` | no |
-| [create\_eks](#input\_create\_eks) | Create EKS cluster | `bool` | `true` | no |
-| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether a an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
-| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
-| [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no |
-| [eks\_readiness\_timeout](#input\_eks\_readiness\_timeout) | The maximum time (in seconds) to wait for EKS API server endpoint to become healthy | `number` | `"600"` | no |
-| [emr\_on\_eks\_teams](#input\_emr\_on\_eks\_teams) | EMR on EKS Teams config | `any` | `{}` | no |
-| [enable\_emr\_on\_eks](#input\_enable\_emr\_on\_eks) | Enable EMR on EKS | `bool` | `false` | no |
-| [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no |
-| [enable\_windows\_support](#input\_enable\_windows\_support) | Enable Windows support | `bool` | `false` | no |
-| [fargate\_profiles](#input\_fargate\_profiles) | Fargate profile configuration | `any` | `{}` | no |
-| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no |
-| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the cluster. Required if `create_iam_role` is set to `false` | `string` | `null` | no |
-| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no |
-| [iam\_role\_path](#input\_iam\_role\_path) | Cluster IAM role path | `string` | `null` | no |
-| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
-| [managed\_node\_groups](#input\_managed\_node\_groups) | Managed node groups configuration | `any` | `{}` | no |
-| [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth ConfigMap | `list(string)` | `[]` | no |
-| [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth ConfigMap | list(object({ rolearn = string username = string groups = list(string) })) | `[]` | no |
-| [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth ConfigMap | list(object({ userarn = string username = string groups = list(string) })) | `[]` | no |
-| [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no |
-| [node\_security\_group\_tags](#input\_node\_security\_group\_tags) | A map of additional tags to add to the node security group created | `map(string)` | `{}` | no |
-| [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider | `list(string)` | `[]` | no |
-| [platform\_teams](#input\_platform\_teams) | Map of maps of platform teams to create | `any` | `{}` | no |
-| [private\_subnet\_ids](#input\_private\_subnet\_ids) | List of private subnets Ids for the cluster and worker nodes | `list(string)` | `[]` | no |
-| [public\_subnet\_ids](#input\_public\_subnet\_ids) | List of public subnets Ids for the worker nodes | `list(string)` | `[]` | no |
-| [self\_managed\_node\_groups](#input\_self\_managed\_node\_groups) | Self-managed node groups configuration | `any` | `{}` | no |
-| [tags](#input\_tags) | Additional tags (e.g. `map('BusinessUnit`,`XYZ`) | `map(string)` | `{}` | no |
-| [vpc\_id](#input\_vpc\_id) | VPC Id | `string` | n/a | yes |
-| [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no |
-
-## Outputs
-
-| Name | Description |
-|------|-------------|
-| [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
-| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
-| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | EKS Control Plane Security Group ID |
-| [configure\_kubectl](#output\_configure\_kubectl) | Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig |
-| [eks\_cluster\_arn](#output\_eks\_cluster\_arn) | Amazon EKS Cluster Name |
-| [eks\_cluster\_certificate\_authority\_data](#output\_eks\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
-| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
-| [eks\_cluster\_id](#output\_eks\_cluster\_id) | Amazon EKS Cluster Name |
-| [eks\_cluster\_status](#output\_eks\_cluster\_status) | Amazon EKS Cluster Status |
-| [eks\_cluster\_version](#output\_eks\_cluster\_version) | The Kubernetes version for the cluster |
-| [eks\_oidc\_issuer\_url](#output\_eks\_oidc\_issuer\_url) | The URL on the EKS cluster OIDC Issuer |
-| [eks\_oidc\_provider\_arn](#output\_eks\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true`. |
-| [emr\_on\_eks\_role\_arn](#output\_emr\_on\_eks\_role\_arn) | IAM execution role ARN for EMR on EKS |
-| [emr\_on\_eks\_role\_id](#output\_emr\_on\_eks\_role\_id) | IAM execution role ID for EMR on EKS |
-| [fargate\_profiles](#output\_fargate\_profiles) | Outputs from EKS Fargate profiles groups |
-| [fargate\_profiles\_aws\_auth\_config\_map](#output\_fargate\_profiles\_aws\_auth\_config\_map) | Fargate profiles AWS auth map |
-| [fargate\_profiles\_iam\_role\_arns](#output\_fargate\_profiles\_iam\_role\_arns) | IAM role arn's for Fargate Profiles |
-| [managed\_node\_group\_arn](#output\_managed\_node\_group\_arn) | Managed node group arn |
-| [managed\_node\_group\_aws\_auth\_config\_map](#output\_managed\_node\_group\_aws\_auth\_config\_map) | Managed node groups AWS auth map |
-| [managed\_node\_group\_iam\_instance\_profile\_arns](#output\_managed\_node\_group\_iam\_instance\_profile\_arns) | IAM instance profile arn's of managed node groups |
-| [managed\_node\_group\_iam\_instance\_profile\_id](#output\_managed\_node\_group\_iam\_instance\_profile\_id) | IAM instance profile id of managed node groups |
-| [managed\_node\_group\_iam\_role\_arns](#output\_managed\_node\_group\_iam\_role\_arns) | IAM role arn's of managed node groups |
-| [managed\_node\_group\_iam\_role\_names](#output\_managed\_node\_group\_iam\_role\_names) | IAM role names of managed node groups |
-| [managed\_node\_groups](#output\_managed\_node\_groups) | Outputs from EKS Managed node groups |
-| [managed\_node\_groups\_id](#output\_managed\_node\_groups\_id) | EKS Managed node groups id |
-| [managed\_node\_groups\_status](#output\_managed\_node\_groups\_status) | EKS Managed node groups status |
-| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
-| [self\_managed\_node\_group\_autoscaling\_groups](#output\_self\_managed\_node\_group\_autoscaling\_groups) | Autoscaling group names of self managed node groups |
-| [self\_managed\_node\_group\_aws\_auth\_config\_map](#output\_self\_managed\_node\_group\_aws\_auth\_config\_map) | Self managed node groups AWS auth map |
-| [self\_managed\_node\_group\_iam\_instance\_profile\_id](#output\_self\_managed\_node\_group\_iam\_instance\_profile\_id) | IAM instance profile id of managed node groups |
-| [self\_managed\_node\_group\_iam\_role\_arns](#output\_self\_managed\_node\_group\_iam\_role\_arns) | IAM role arn's of self managed node groups |
-| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Outputs from EKS Self-managed node groups |
-| [teams](#output\_teams) | Outputs from EKS Fargate profiles groups |
-| [windows\_node\_group\_aws\_auth\_config\_map](#output\_windows\_node\_group\_aws\_auth\_config\_map) | Windows node groups AWS auth map |
-| [worker\_node\_security\_group\_arn](#output\_worker\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the worker node shared security group |
-| [worker\_node\_security\_group\_id](#output\_worker\_node\_security\_group\_id) | ID of the worker node shared security group |
-
-
## Security
See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information.
diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf
index 69510a0688..91db419835 100644
--- a/examples/agones-game-controller/main.tf
+++ b/examples/agones-game-controller/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,48 +39,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
-
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- create_launch_template = true
- launch_template_os = "amazonlinux2eks"
- public_ip = true
- pre_userdata = <<-EOT
- yum install -y amazon-ssm-agent
- systemctl enable amazon-ssm-agent && systemctl start amazon-ssm-agent"
- EOT
-
- desired_size = 3
- max_size = 12
- min_size = 3
- max_unavailable = 1
-
- ami_type = "AL2_x86_64"
- capacity_type = "ON_DEMAND"
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
instance_types = ["m5.large"]
- disk_size = 50
-
- subnet_ids = module.vpc.public_subnets
-
- k8s_labels = {
- Environment = "preprod"
- Zone = "dev"
- WorkerType = "ON_DEMAND"
- }
- additional_tags = {
- ExtraTag = "m5x-on-demand"
- Name = "m5x-on-demand"
- subnet_type = "public"
- }
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -90,11 +87,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
- eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# Add-ons
enable_metrics_server = true
@@ -139,7 +138,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -148,13 +146,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/agones-game-controller/outputs.tf b/examples/agones-game-controller/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/agones-game-controller/outputs.tf
+++ b/examples/agones-game-controller/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/ai-ml/ray/main.tf b/examples/ai-ml/ray/main.tf
index 8b07bedc25..b569724988 100644
--- a/examples/ai-ml/ray/main.tf
+++ b/examples/ai-ml/ray/main.tf
@@ -3,30 +3,16 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
-
- exec {
- api_version = "client.authentication.k8s.io/v1beta1"
- command = "aws"
- # This requires the awscli to be installed locally where Terraform is executed
- args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id]
- }
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
-
- exec {
- api_version = "client.authentication.k8s.io/v1beta1"
- command = "aws"
- # This requires the awscli to be installed locally where Terraform is executed
- args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id]
- }
}
}
@@ -35,6 +21,10 @@ provider "grafana" {
auth = "admin:${aws_secretsmanager_secret_version.grafana.secret_string}"
}
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
data "aws_availability_zones" "available" {}
data "aws_caller_identity" "current" {}
@@ -45,10 +35,6 @@ data "aws_acm_certificate" "issued" {
statuses = ["ISSUED"]
}
-data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
-}
-
locals {
name = basename(path.cwd)
namespace = "ray-cluster"
@@ -66,60 +52,46 @@ locals {
#---------------------------------------------------------------
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- #----------------------------------------------------------------------------------------------------------#
- # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks).
- # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html.
- # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons
- # See the notes below for each rule used in these examples
- #----------------------------------------------------------------------------------------------------------#
node_security_group_additional_rules = {
- # Extend node-to-node security group rules. Recommended and required for the Add-ons
- ingress_self_all = {
- description = "Node to node all ports/protocols"
- protocol = "-1"
- from_port = 0
- to_port = 0
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
type = "ingress"
self = true
}
- # Recommended outbound traffic for Node groups
egress_all = {
- description = "Node all egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
- }
- # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane.
- # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc.
- # Change this according to your security requirements if needed
- ingress_cluster_to_node_all_traffic = {
- description = "Cluster API to Nodegroup all traffic"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "ingress"
- source_cluster_security_group = true
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
}
}
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.8xlarge"]
- min_size = 3
- subnet_ids = module.vpc.private_subnets
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -132,10 +104,10 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
eks_cluster_domain = var.eks_cluster_domain
# Add-Ons
@@ -241,7 +213,7 @@ data "aws_iam_policy_document" "irsa_policy" {
resource "aws_iam_policy" "irsa_policy" {
description = "IAM Policy for IRSA"
- name_prefix = substr("${module.eks_blueprints.eks_cluster_id}-${local.namespace}-access", 0, 127)
+ name_prefix = substr("${module.eks.cluster_id}-${local.namespace}-access", 0, 127)
policy = data.aws_iam_policy_document.irsa_policy.json
}
@@ -250,8 +222,8 @@ module "cluster_irsa" {
kubernetes_namespace = local.namespace
kubernetes_service_account = "${local.namespace}-sa"
irsa_iam_policies = [aws_iam_policy.irsa_policy.arn]
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_oidc_provider_arn = module.eks_blueprints.eks_oidc_provider_arn
+ eks_cluster_id = module.eks.cluster_id
+ eks_oidc_provider_arn = module.eks.oidc_provider_arn
depends_on = [module.s3_bucket]
}
@@ -335,7 +307,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -344,13 +315,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/ai-ml/ray/outputs.tf b/examples/ai-ml/ray/outputs.tf
index f5e106a5c6..ebd7f83909 100644
--- a/examples/ai-ml/ray/outputs.tf
+++ b/examples/ai-ml/ray/outputs.tf
@@ -1,6 +1,6 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
output "s3_bucket" {
diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf
index 73eaf51025..95aa26a989 100644
--- a/examples/appmesh-mtls/main.tf
+++ b/examples/appmesh-mtls/main.tf
@@ -3,29 +3,29 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -48,28 +48,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- this = {
- node_group_name = local.name
- instance_types = ["m5.large"]
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
min_size = 1
- max_size = 2
+ max_size = 3
desired_size = 1
-
- update_config = [{
- max_unavailable_percentage = 30
- }]
}
}
@@ -79,12 +96,15 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
eks_cluster_domain = var.eks_cluster_domain
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
+
enable_amazon_eks_vpc_cni = true
enable_amazon_eks_coredns = true
enable_amazon_eks_kube_proxy = true
@@ -141,7 +161,7 @@ resource "kubectl_manifest" "cluster_pca_issuer" {
kind = "AWSPCAClusterIssuer"
metadata = {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
spec = {
@@ -169,7 +189,7 @@ resource "kubectl_manifest" "example_pca_certificate" {
issuerRef = {
group = "awspca.cert-manager.io"
kind = "AWSPCAClusterIssuer"
- name : module.eks_blueprints.eks_cluster_id
+ name : module.eks.cluster_id
}
renewBefore = "360h0m0s"
# This is the name with which the K8 Secret will be available
@@ -218,13 +238,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/appmesh-mtls/outputs.tf b/examples/appmesh-mtls/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/appmesh-mtls/outputs.tf
+++ b/examples/appmesh-mtls/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/complete-kubernetes-addons/main.tf b/examples/complete-kubernetes-addons/main.tf
index bd0c9c7fc9..ce68f7cf1b 100644
--- a/examples/complete-kubernetes-addons/main.tf
+++ b/examples/complete-kubernetes-addons/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,89 +39,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- #----------------------------------------------------------------------------------------------------------#
- # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks).
- # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html.
- # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons
- # See the notes below for each rule used in these examples
- #----------------------------------------------------------------------------------------------------------#
node_security_group_additional_rules = {
- # Extend node-to-node security group rules. Recommended and required for the Add-ons
- ingress_self_all = {
- description = "Node to node all ports/protocols"
- protocol = "-1"
- from_port = 0
- to_port = 0
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
type = "ingress"
self = true
}
- # Recommended outbound traffic for Node groups
egress_all = {
- description = "Node all egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
- }
- # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane.
- # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc.
- # Change this according to your security requirements if needed
- ingress_cluster_to_node_all_traffic = {
- description = "Cluster API to Nodegroup all traffic"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "ingress"
- source_cluster_security_group = true
- }
- }
-
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- subnet_ids = module.vpc.private_subnets
- force_update_version = true
- }
- }
-
- self_managed_node_groups = {
- self_mg_5 = {
- node_group_name = "self-managed-ondemand"
- instance_type = "m5.large"
- launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket or windows
- custom_ami_id = data.aws_ami.eks.id # Bring your own custom AMI generated by Packer/ImageBuilder/Puppet etc.
- subnet_ids = module.vpc.private_subnets
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
}
}
- fargate_profiles = {
+ eks_managed_node_groups = {
default = {
- fargate_profile_name = "default"
- fargate_profile_namespaces = [
- {
- namespace = "default"
- k8s_labels = {
- Environment = "preprod"
- Zone = "dev"
- env = "fargate"
- }
- }]
- subnet_ids = module.vpc.private_subnets
- additional_tags = {
- ExtraTag = "Fargate"
- }
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 2
}
}
@@ -131,12 +87,14 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
- eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id
- auto_scaling_group_names = module.eks_blueprints.self_managed_node_group_autoscaling_groups
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+ eks_worker_security_group_id = module.eks.node_security_group_id
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# EKS Addons
enable_amazon_eks_vpc_cni = true
@@ -164,11 +122,11 @@ module "eks_blueprints_kubernetes_addons" {
repository = "https://aws.github.io/eks-charts"
version = "0.1.18"
namespace = "logging"
- aws_for_fluent_bit_cw_log_group = "/${module.eks_blueprints.eks_cluster_id}/worker-fluentbit-logs" # Optional
+ aws_for_fluent_bit_cw_log_group = "/${module.eks.cluster_id}/worker-fluentbit-logs" # Optional
create_namespace = true
values = [templatefile("${path.module}/helm_values/aws-for-fluentbit-values.yaml", {
region = local.region
- aws_for_fluent_bit_cw_log_group = "/${module.eks_blueprints.eks_cluster_id}/worker-fluentbit-logs"
+ aws_for_fluent_bit_cw_log_group = "/${module.eks.cluster_id}/worker-fluentbit-logs"
})]
set = [
{
@@ -178,40 +136,6 @@ module "eks_blueprints_kubernetes_addons" {
]
}
- enable_fargate_fluentbit = true
- fargate_fluentbit_addon_config = {
- output_conf = <<-EOF
- [OUTPUT]
- Name cloudwatch_logs
- Match *
- region ${local.region}
- log_group_name /${module.eks_blueprints.eks_cluster_id}/fargate-fluentbit-logs
- log_stream_prefix "fargate-logs-"
- auto_create_group true
- EOF
-
- filters_conf = <<-EOF
- [FILTER]
- Name parser
- Match *
- Key_Name log
- Parser regex
- Preserve_Key True
- Reserve_Data True
- EOF
-
- parsers_conf = <<-EOF
- [PARSER]
- Name regex
- Format regex
- Regex ^(?[^ ]+) (?[^ ]+) (?[^ ]+) (?.+)$
- Time_Key time
- Time_Format %Y-%m-%dT%H:%M:%S.%L%z
- Time_Keep On
- Decode_Field_As json message
- EOF
- }
-
enable_kyverno = true
enable_kyverno_policies = true
enable_kyverno_policy_reporter = true
@@ -247,7 +171,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -256,23 +179,12 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
-
-data "aws_ami" "eks" {
- most_recent = true
- filter {
- name = "name"
- values = ["amazon-eks-node-${module.eks_blueprints.eks_cluster_version}-*"]
- }
- owners = ["amazon"]
-}
diff --git a/examples/complete-kubernetes-addons/outputs.tf b/examples/complete-kubernetes-addons/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/complete-kubernetes-addons/outputs.tf
+++ b/examples/complete-kubernetes-addons/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/crossplane/main.tf b/examples/crossplane/main.tf
index 269964cd81..2322c6ad4e 100644
--- a/examples/crossplane/main.tf
+++ b/examples/crossplane/main.tf
@@ -3,29 +3,29 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 30
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -47,21 +47,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -71,10 +95,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_crossplane = true
@@ -122,7 +149,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -131,13 +157,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/crossplane/outputs.tf b/examples/crossplane/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/crossplane/outputs.tf
+++ b/examples/crossplane/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/eks-cluster-with-external-dns/main.tf b/examples/eks-cluster-with-external-dns/main.tf
index 8295fd6e88..947aa27f86 100644
--- a/examples/eks-cluster-with-external-dns/main.tf
+++ b/examples/eks-cluster-with-external-dns/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_acm_certificate" "issued" {
@@ -44,21 +44,53 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_alb_controller_port = {
+ description = "Cluster API to Node group for ALB controller webhook"
+ protocol = "tcp"
+ from_port = 9443
+ to_port = 9443
+ type = "ingress"
+ source_cluster_security_group = true
+ }
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -68,12 +100,15 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
eks_cluster_domain = var.eks_cluster_domain
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
+
enable_argocd = true
argocd_applications = {
workloads = {
@@ -122,7 +157,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -131,13 +165,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/eks-cluster-with-external-dns/outputs.tf b/examples/eks-cluster-with-external-dns/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/eks-cluster-with-external-dns/outputs.tf
+++ b/examples/eks-cluster-with-external-dns/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/eks-cluster-with-new-vpc/main.tf b/examples/eks-cluster-with-new-vpc/main.tf
index 413a22e85f..f577a801ee 100644
--- a/examples/eks-cluster-with-new-vpc/main.tf
+++ b/examples/eks-cluster-with-new-vpc/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {
@@ -46,23 +46,61 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.cluster_name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
-
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- min_size = 3
- max_size = 3
- desired_size = 3
- subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_gatekeeper_webhook = {
+ description = "Gatekeeper webhook"
+ protocol = "tcp"
+ from_port = 8443
+ to_port = 8443
+ type = "ingress"
+ source_cluster_security_group = true
+ }
+ ingress_alb_controller_webhook = {
+ description = "ALB controller webhook"
+ protocol = "tcp"
+ from_port = 9443
+ to_port = 9443
+ type = "ingress"
+ source_cluster_security_group = true
+ }
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 3
+ max_size = 3
+ desired_size = 3
}
}
@@ -72,10 +110,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# EKS Managed Add-ons
enable_amazon_eks_vpc_cni = true
@@ -135,7 +176,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -144,13 +184,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.cluster_name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.cluster_name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/eks-cluster-with-new-vpc/outputs.tf b/examples/eks-cluster-with-new-vpc/outputs.tf
index d21fcadacd..bdbb7c7b14 100644
--- a/examples/eks-cluster-with-new-vpc/outputs.tf
+++ b/examples/eks-cluster-with-new-vpc/outputs.tf
@@ -15,37 +15,37 @@ output "vpc_cidr" {
output "eks_cluster_id" {
description = "EKS cluster ID"
- value = module.eks_blueprints.eks_cluster_id
+ value = module.eks.cluster_id
}
output "eks_managed_nodegroups" {
description = "EKS managed node groups"
- value = module.eks_blueprints.managed_node_groups
+ value = module.eks.eks_managed_node_groups
}
output "eks_managed_nodegroup_ids" {
description = "EKS managed node group ids"
- value = module.eks_blueprints.managed_node_groups_id
+ value = [for grp in module.eks.eks_managed_node_groups : grp.node_group_id]
}
output "eks_managed_nodegroup_arns" {
description = "EKS managed node group arns"
- value = module.eks_blueprints.managed_node_group_arn
+ value = [for grp in module.eks.eks_managed_node_groups : grp.node_group_arn]
}
output "eks_managed_nodegroup_role_name" {
description = "EKS managed node group role name"
- value = module.eks_blueprints.managed_node_group_iam_role_names
+ value = [for grp in module.eks.eks_managed_node_groups : grp.iam_role_name]
}
output "eks_managed_nodegroup_status" {
description = "EKS managed node group status"
- value = module.eks_blueprints.managed_node_groups_status
+ value = [for grp in module.eks.eks_managed_node_groups : grp.node_group_status]
}
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
# Region used for Terratest
diff --git a/examples/external-secrets/main.tf b/examples/external-secrets/main.tf
index 615b9a70e2..79d89b40cb 100644
--- a/examples/external-secrets/main.tf
+++ b/examples/external-secrets/main.tf
@@ -3,37 +3,37 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ apply_retry_count = 10
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+ load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
data "aws_caller_identity" "current" {}
-data "aws_region" "current" {}
locals {
- name = basename(path.cwd)
- namespace = "external-secrets"
- region = "us-west-2"
+ name = basename(path.cwd)
+ region = "us-west-2"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
@@ -53,60 +53,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- #----------------------------------------------------------------------------------------------------------#
- # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks).
- # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html.
- # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons
- # See the notes below for each rule used in these examples
- #----------------------------------------------------------------------------------------------------------#
node_security_group_additional_rules = {
- # Extend node-to-node security group rules. Recommended and required for the Add-ons
- ingress_self_all = {
- description = "Node to node all ports/protocols"
- protocol = "-1"
- from_port = 0
- to_port = 0
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
type = "ingress"
self = true
}
- # Recommended outbound traffic for Node groups
egress_all = {
- description = "Node all egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
- }
- # Allows Control Plane Nodes to talk to Worker nodes on all ports. Added this to simplify the example and further avoid issues with Add-ons communication with Control plane.
- # This can be restricted further to specific port based on the requirement for each Add-on e.g., metrics-server 4443, spark-operator 8080, karpenter 8443 etc.
- # Change this according to your security requirements if needed
- ingress_cluster_to_node_all_traffic = {
- description = "Cluster API to Nodegroup all traffic"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "ingress"
- source_cluster_security_group = true
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
}
}
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- subnet_ids = module.vpc.private_subnets
- force_update_version = true
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -116,11 +101,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
- eks_worker_security_group_id = module.eks_blueprints.worker_node_security_group_id
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_external_secrets = true
@@ -146,7 +133,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -155,13 +141,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
@@ -177,12 +161,12 @@ resource "aws_kms_key" "secrets" {
module "cluster_secretstore_role" {
source = "../../modules/irsa"
- kubernetes_namespace = local.namespace
+ kubernetes_namespace = local.name
create_kubernetes_namespace = false
kubernetes_service_account = local.cluster_secretstore_sa
irsa_iam_policies = [aws_iam_policy.cluster_secretstore.arn]
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_oidc_provider_arn = module.eks_blueprints.eks_oidc_provider_arn
+ eks_cluster_id = module.eks.cluster_id
+ eks_oidc_provider_arn = module.eks.oidc_provider_arn
depends_on = [module.eks_blueprints_kubernetes_addons]
}
@@ -225,12 +209,12 @@ spec:
provider:
aws:
service: SecretsManager
- region: ${data.aws_region.current.name}
+ region: ${local.region}
auth:
jwt:
serviceAccountRef:
name: ${local.cluster_secretstore_sa}
- namespace: ${local.namespace}
+ namespace: ${local.name}
YAML
depends_on = [module.eks_blueprints_kubernetes_addons]
}
@@ -254,7 +238,7 @@ apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ${local.name}-sm
- namespace: ${local.namespace}
+ namespace: ${local.name}
spec:
refreshInterval: 1h
secretStoreRef:
@@ -273,12 +257,12 @@ YAML
module "secretstore_role" {
source = "../../modules/irsa"
- kubernetes_namespace = local.namespace
+ kubernetes_namespace = local.name
create_kubernetes_namespace = false
kubernetes_service_account = local.secretstore_sa
irsa_iam_policies = [aws_iam_policy.secretstore.arn]
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_oidc_provider_arn = module.eks_blueprints.eks_oidc_provider_arn
+ eks_cluster_id = module.eks.cluster_id
+ eks_oidc_provider_arn = module.eks.oidc_provider_arn
depends_on = [module.eks_blueprints_kubernetes_addons]
}
@@ -293,7 +277,7 @@ resource "aws_iam_policy" "secretstore" {
"Action": [
"ssm:GetParameter*"
],
- "Resource": "arn:aws:ssm:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:parameter/${local.name}/*"
+ "Resource": "arn:aws:ssm:${local.region}:${data.aws_caller_identity.current.account_id}:parameter/${local.name}/*"
},
{
"Effect": "Allow",
@@ -313,12 +297,12 @@ apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
name: ${local.secretstore_name}
- namespace: ${local.namespace}
+ namespace: ${local.name}
spec:
provider:
aws:
service: ParameterStore
- region: ${data.aws_region.current.name}
+ region: ${local.region}
auth:
jwt:
serviceAccountRef:
@@ -344,7 +328,7 @@ apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ${local.name}-ps
- namespace: ${local.namespace}
+ namespace: ${local.name}
spec:
refreshInterval: 1h
secretStoreRef:
diff --git a/examples/external-secrets/outputs.tf b/examples/external-secrets/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/external-secrets/outputs.tf
+++ b/examples/external-secrets/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/fargate-serverless/main.tf b/examples/fargate-serverless/main.tf
index 02086910ed..e204658c6f 100644
--- a/examples/fargate-serverless/main.tf
+++ b/examples/fargate-serverless/main.tf
@@ -3,24 +3,23 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
-data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
@@ -40,62 +39,82 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/485
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/494
- cluster_kms_key_additional_admin_arns = [data.aws_caller_identity.current.arn]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
fargate_profiles = {
# Providing compute for default namespace
default = {
- fargate_profile_name = "default"
- fargate_profile_namespaces = [
+ name = "default"
+ selectors = [
{
namespace = "default"
- }]
- subnet_ids = module.vpc.private_subnets
+ }
+ ]
}
# Providing compute for kube-system namespace where core addons reside
kube_system = {
- fargate_profile_name = "kube-system"
- fargate_profile_namespaces = [
+ name = "kube-system"
+ selectors = [
{
namespace = "kube-system"
- }]
- subnet_ids = module.vpc.private_subnets
+ }
+ ]
}
- # Sample application
+
app = {
- fargate_profile_name = "app-wildcard"
- fargate_profile_namespaces = [
+ name = "app-wildcard"
+ selectors = [
{
namespace = "app-*"
- }]
- subnet_ids = module.vpc.private_subnets
+ }
+ ]
}
}
+
tags = local.tags
}
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
# Wait on the `kube-system` profile before provisioning addons
- data_plane_wait_arn = module.eks_blueprints.fargate_profiles["kube_system"].eks_fargate_profile_arn
+ data_plane_wait_arn = module.eks.fargate_profiles["kube_system"].fargate_profile_arn
enable_amazon_eks_vpc_cni = true
amazon_eks_vpc_cni_config = {
@@ -113,7 +132,7 @@ module "eks_blueprints_kubernetes_addons" {
self_managed_coredns_helm_config = {
# Sets the correct annotations to ensure the Fargate provisioner is used and not the EC2 provisioner
compute_type = "fargate"
- kubernetes_version = module.eks_blueprints.eks_cluster_version
+ kubernetes_version = module.eks.cluster_version
}
# Sample application
@@ -121,6 +140,38 @@ module "eks_blueprints_kubernetes_addons" {
# Enable Fargate logging
enable_fargate_fluentbit = true
+ fargate_fluentbit_addon_config = {
+ output_conf = <<-EOF
+ [OUTPUT]
+ Name cloudwatch_logs
+ Match *
+ region ${local.region}
+ log_group_name /${module.eks.cluster_id}/fargate-fluentbit-logs
+ log_stream_prefix fargate-logs-
+ auto_create_group true
+ EOF
+
+ filters_conf = <<-EOF
+ [FILTER]
+ Name parser
+ Match *
+ Key_Name log
+ Parser regex
+ Preserve_Key True
+ Reserve_Data True
+ EOF
+
+ parsers_conf = <<-EOF
+ [PARSER]
+ Name regex
+ Format regex
+ Regex ^(?[^ ]+) (?[^ ]+) (?[^ ]+) (?.+)$
+ Time_Key time
+ Time_Format %Y-%m-%dT%H:%M:%S.%L%z
+ Time_Keep On
+ Decode_Field_As json message
+ EOF
+ }
enable_aws_load_balancer_controller = true
aws_load_balancer_controller_helm_config = {
@@ -158,7 +209,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
diff --git a/examples/fargate-serverless/outputs.tf b/examples/fargate-serverless/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/fargate-serverless/outputs.tf
+++ b/examples/fargate-serverless/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/fully-private-eks-cluster/eks/main.tf b/examples/fully-private-eks-cluster/eks/main.tf
index 62a0e49356..01623b6e0d 100644
--- a/examples/fully-private-eks-cluster/eks/main.tf
+++ b/examples/fully-private-eks-cluster/eks/main.tf
@@ -1,57 +1,69 @@
provider "aws" {
- region = var.region
- alias = "default"
+ region = local.region
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
-
- exec {
- api_version = "client.authentication.k8s.io/v1beta1"
- command = "aws"
- # This requires the awscli to be installed locally where Terraform is executed
- args = ["eks", "get-token", "--cluster-name", module.eks_blueprints.eks_cluster_id]
- }
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
+ token = data.aws_eks_cluster_auth.this.token
+}
+
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
}
locals {
- name = basename(path.cwd)
- vpc_id = var.vpc_id
- private_subnet_ids = var.private_subnet_ids
+ name = basename(path.cwd)
+ region = "us-west-2"
+
tags = {
Blueprint = local.name
GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
}
}
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
- cluster_name = local.name
+ cluster_name = local.name
+ cluster_version = "1.23"
- # EKS Cluster VPC and Subnets
- vpc_id = local.vpc_id
- private_subnet_ids = local.private_subnet_ids
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_public_access = false
+ cluster_endpoint_private_access = true
- # Cluster Security Group
- cluster_security_group_additional_rules = var.cluster_security_group_additional_rules
+ vpc_id = var.vpc_id
+ subnet_ids = var.private_subnet_ids
- # EKS CONTROL PLANE VARIABLES
- cluster_version = var.cluster_version
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
- cluster_endpoint_public_access = false
- cluster_endpoint_private_access = true
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
- # EKS MANAGED NODE GROUPS
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- subnet_ids = local.private_subnet_ids
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
- #Custom Tags.
tags = local.tags
}
diff --git a/examples/fully-private-eks-cluster/eks/outputs.tf b/examples/fully-private-eks-cluster/eks/outputs.tf
index 120cf1a8a7..6cc16a0b10 100644
--- a/examples/fully-private-eks-cluster/eks/outputs.tf
+++ b/examples/fully-private-eks-cluster/eks/outputs.tf
@@ -1,9 +1,9 @@
output "cluster_id" {
description = "The ID of the EKS Cluster"
- value = module.eks_blueprints.eks_cluster_id
+ value = module.eks.cluster_id
}
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/fully-private-eks-cluster/eks/variables.tf b/examples/fully-private-eks-cluster/eks/variables.tf
index b87bedeabd..4c07255721 100644
--- a/examples/fully-private-eks-cluster/eks/variables.tf
+++ b/examples/fully-private-eks-cluster/eks/variables.tf
@@ -1,24 +1,3 @@
-variable "cluster_version" {
- description = "Kubernetes `.` version to use for the EKS cluster (i.e.: `1.23`)"
- type = string
- default = "1.23"
-}
-
-variable "region" {
- type = string
- description = "AWS region"
- default = "us-west-2"
-}
-
-#-------------------------------
-# EKS Cluster Security Groups
-#-------------------------------
-variable "cluster_security_group_additional_rules" {
- description = "List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source"
- type = any
- default = {}
-}
-
variable "vpc_id" {
type = string
description = "VPC ID where the EKS cluster will be deployed to"
diff --git a/examples/fully-private-eks-cluster/vpc/main.tf b/examples/fully-private-eks-cluster/vpc/main.tf
index 841965cb58..0c2de3258d 100644
--- a/examples/fully-private-eks-cluster/vpc/main.tf
+++ b/examples/fully-private-eks-cluster/vpc/main.tf
@@ -1,6 +1,5 @@
provider "aws" {
region = var.region
- alias = "default"
}
data "aws_availability_zones" "available" {}
@@ -18,12 +17,12 @@ locals {
Blueprint = local.vpc_name
GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints"
}
-
}
#---------------------------------------------------------------
# Supporting Resources
#---------------------------------------------------------------
+
module "cloud9_vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
@@ -71,8 +70,7 @@ module "aws_vpc" {
default_security_group_tags = { Name = "${local.vpc_name}-default" }
private_subnet_tags = {
- "kubernetes.io/cluster/${local.vpc_name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
@@ -97,7 +95,6 @@ module "aws_vpc" {
protocol = -1
cidr_blocks = "0.0.0.0/0"
}]
-
}
module "vpc_endpoints_sg" {
diff --git a/examples/gitops/argocd/main.tf b/examples/gitops/argocd/main.tf
index 1e1eab89b1..0484af7e11 100644
--- a/examples/gitops/argocd/main.tf
+++ b/examples/gitops/argocd/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,24 +39,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
- desired_size = 5
- max_size = 10
min_size = 3
+ max_size = 10
+ desired_size = 5
}
}
@@ -66,10 +87,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_argocd = true
# This example shows how to set default ArgoCD Admin Password using SecretsManager with Helm Chart set_sensitive values.
@@ -100,9 +124,7 @@ module "eks_blueprints_kubernetes_addons" {
enable_amazon_eks_aws_ebs_csi_driver = true
enable_aws_for_fluentbit = true
enable_cert_manager = true
- enable_cluster_autoscaler = true
enable_karpenter = true
- enable_keda = true
enable_metrics_server = true
enable_prometheus = true
enable_traefik = true
@@ -159,7 +181,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -168,13 +189,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/gitops/argocd/outputs.tf b/examples/gitops/argocd/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/gitops/argocd/outputs.tf
+++ b/examples/gitops/argocd/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/grafana-loki/main.tf b/examples/grafana-loki/main.tf
index f3c5ea8c48..b5583e88a0 100644
--- a/examples/grafana-loki/main.tf
+++ b/examples/grafana-loki/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,20 +39,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- velero = {
- node_group_name = "velero"
- launch_template_os = "amazonlinux2eks"
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -62,10 +87,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_promtail = true
enable_calico = true
@@ -92,7 +120,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -101,13 +128,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/grafana-loki/outputs.tf b/examples/grafana-loki/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/grafana-loki/outputs.tf
+++ b/examples/grafana-loki/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/ipv4-prefix-delegation/main.tf b/examples/ipv4-prefix-delegation/main.tf
index c670ce1840..3f010fca80 100644
--- a/examples/ipv4-prefix-delegation/main.tf
+++ b/examples/ipv4-prefix-delegation/main.tf
@@ -3,32 +3,29 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
-data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = basename(path.cwd)
region = "us-west-2"
- cluster_version = "1.23"
-
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
@@ -42,43 +39,74 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
- cluster_version = local.cluster_version
+ cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/485
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/494
- cluster_kms_key_additional_admin_arns = [data.aws_caller_identity.current.arn]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- managed_node_groups = {
- custom_ami = {
- node_group_name = "custom-ami" # Max 40 characters for node group name
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
min_size = 1
- max_size = 1
+ max_size = 3
desired_size = 1
- custom_ami_id = data.aws_ssm_parameter.eks_optimized_ami.value
- instance_types = ["m5.xlarge"]
-
- create_launch_template = true
- launch_template_os = "amazonlinux2eks"
-
# https://docs.aws.amazon.com/eks/latest/userguide/choosing-instance-type.html#determine-max-pods
- pre_userdata = <<-EOT
- MAX_PODS=$(/etc/eks/max-pods-calculator.sh --instance-type-from-imds --cni-version ${trimprefix(data.aws_eks_addon_version.latest["vpc-cni"].version, "v")} --cni-prefix-delegation-enabled)
- EOT
-
# These settings opt out of the default behavior and use the maximum number of pods, with a cap of 110 due to
# Kubernetes guidance https://kubernetes.io/docs/setup/best-practices/cluster-large/
# See more info here https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html
- kubelet_extra_args = "--max-pods=$${MAX_PODS}"
- bootstrap_extra_args = "--use-max-pods false"
+ # See issue https://github.com/awslabs/amazon-eks-ami/issues/844
+ pre_bootstrap_user_data = <<-EOT
+ #!/bin/bash
+ set -ex
+
+ MAX_PODS=$(/etc/eks/max-pods-calculator.sh \
+ --instance-type-from-imds \
+ --cni-version ${trimprefix(data.aws_eks_addon_version.latest["vpc-cni"].version, "v")} \
+ --cni-prefix-delegation-enabled \
+ )
+
+ cat <<-EOF > /etc/profile.d/bootstrap.sh
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ export KUBELET_EXTRA_ARGS="--max-pods=$${MAX_PODS}"
+ EOF
+
+ # Source extra environment variables in bootstrap script
+ sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh
+ EOT
+
+ update_config = {
+ max_unavailable_percentage = 33
+ }
}
}
@@ -88,10 +116,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_amazon_eks_vpc_cni = true
amazon_eks_vpc_cni_config = {
@@ -113,7 +144,7 @@ data "aws_eks_addon_version" "latest" {
for_each = toset(["vpc-cni"])
addon_name = each.value
- kubernetes_version = module.eks_blueprints.eks_cluster_version
+ kubernetes_version = module.eks.cluster_version
most_recent = true
}
@@ -127,16 +158,16 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = module.eks_blueprints.eks_cluster_certificate_authority_data
- server = module.eks_blueprints.eks_cluster_endpoint
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = module.eks_blueprints.eks_cluster_id
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
@@ -170,10 +201,6 @@ resource "null_resource" "kubectl_set_env" {
# Supporting Resources
#---------------------------------------------------------------
-data "aws_ssm_parameter" "eks_optimized_ami" {
- name = "/aws/service/eks/optimized-ami/${local.cluster_version}/amazon-linux-2/recommended/image_id"
-}
-
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
@@ -189,7 +216,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -198,13 +224,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/ipv4-prefix-delegation/outputs.tf b/examples/ipv4-prefix-delegation/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/ipv4-prefix-delegation/outputs.tf
+++ b/examples/ipv4-prefix-delegation/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/ipv6-eks-cluster/main.tf b/examples/ipv6-eks-cluster/main.tf
index f5725556b8..aef65a473b 100644
--- a/examples/ipv6-eks-cluster/main.tf
+++ b/examples/ipv6-eks-cluster/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,24 +39,49 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
- cluster_name = local.name
- cluster_version = "1.23"
- cluster_ip_family = "ipv6"
+ cluster_name = local.name
+ cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "mng-ondemand"
- instance_types = ["m5.large"]
- min_size = 2
- desired_size = 2
- max_size = 10
- subnet_ids = module.vpc.private_subnets
+ # IPV6
+ cluster_ip_family = "ipv6"
+ create_cni_ipv6_iam_policy = true
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -66,12 +91,15 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
- enable_ipv6 = true # Enable Ipv6 network. Attaches new VPC CNI policy to the IRSA role
+ enable_ipv6 = true
# EKS Managed Add-ons
enable_amazon_eks_coredns = true
@@ -110,7 +138,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -119,13 +146,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/ipv6-eks-cluster/outputs.tf b/examples/ipv6-eks-cluster/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/ipv6-eks-cluster/outputs.tf
+++ b/examples/ipv6-eks-cluster/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf
index ac9a78b665..2aad1cfa4f 100644
--- a/examples/karpenter/main.tf
+++ b/examples/karpenter/main.tf
@@ -3,29 +3,29 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -34,8 +34,6 @@ locals {
name = basename(path.cwd)
region = "us-west-2"
- node_group_name = "managed-ondemand"
-
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
@@ -49,52 +47,43 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- #----------------------------------------------------------------------------------------------------------#
- # Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks).
- # Upstream module implemented Security groups based on the best practices doc https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html.
- # So, by default the security groups are restrictive. Users needs to enable rules for specific ports required for App requirement or Add-ons
- # See the notes below for each rule used in these examples
- #----------------------------------------------------------------------------------------------------------#
- node_security_group_additional_rules = {
- # Extend node-to-node security group rules. Recommended and required for the Add-ons
- ingress_self_all = {
- description = "Node to node all ports/protocols"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "ingress"
- self = true
- }
- # Recommended outbound traffic for Node groups
- egress_all = {
- description = "Node all egress"
- protocol = "-1"
- from_port = 0
- to_port = 0
- type = "egress"
- cidr_blocks = ["0.0.0.0/0"]
- ipv6_cidr_blocks = ["::/0"]
- }
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- # Allows Control Plane Nodes to talk to Worker nodes on Karpenter ports.
- # This can be extended further to specific port based on the requirement for others Add-on e.g., metrics-server 4443, spark-operator 8080, etc.
- # Change this according to your security requirements if needed
+ node_security_group_additional_rules = {
ingress_nodes_karpenter_port = {
- description = "Cluster API to Nodegroup for Karpenter"
+ description = "Cluster API to Node group for Karpenter webhook"
protocol = "tcp"
from_port = 8443
to_port = 8443
type = "ingress"
source_cluster_security_group = true
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
}
}
@@ -103,26 +92,13 @@ module "eks_blueprints" {
"karpenter.sh/discovery/${local.name}" = local.name
}
- # EKS MANAGED NODE GROUPS
- # We recommend to have a MNG to place your critical workloads and add-ons
- # Then rely on Karpenter to scale your workloads
- # You can also make uses on nodeSelector and Taints/tolerations to spread workloads on MNG or Karpenter provisioners
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
-
- subnet_ids = module.vpc.private_subnets
- max_size = 2
- desired_size = 1
- min_size = 1
- update_config = [{
- max_unavailable_percentage = 30
- }]
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
- # Launch template configuration
- create_launch_template = true # false will use the default launch template
- launch_template_os = "amazonlinux2eks" # amazonlinux2eks or bottlerocket
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -132,71 +108,53 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_amazon_eks_aws_ebs_csi_driver = true
- enable_karpenter = true
+ enable_karpenter = true
+ karpenter_helm_config = {
+ set = [
+ {
+ name = "clusterName"
+ value = module.eks.cluster_id
+ },
+ {
+ name = "clusterEndpoint"
+ value = module.eks.cluster_endpoint
+ },
+ {
+ name = "aws.defaultInstanceProfile"
+ value = aws_iam_instance_profile.karpenter.name
+ }
+ ]
+ }
enable_aws_node_termination_handler = true
enable_kubecost = true
-
- enable_datadog_operator = true
+ enable_datadog_operator = true
tags = local.tags
}
-# Creates Launch templates for Karpenter
-# Launch template outputs will be used in Karpenter Provisioners yaml files. Checkout this examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml
-module "karpenter_launch_templates" {
- source = "../../modules/launch-templates"
-
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
-
- launch_template_config = {
- linux = {
- ami = data.aws_ami.eks.id
- launch_template_prefix = "karpenter"
- iam_instance_profile = module.eks_blueprints.managed_node_group_iam_instance_profile_id[0]
- vpc_security_group_ids = [module.eks_blueprints.worker_node_security_group_id]
- block_device_mappings = [
- {
- device_name = "/dev/xvda"
- volume_type = "gp3"
- volume_size = 200
- }
- ]
- }
+resource "aws_iam_instance_profile" "karpenter" {
+ name = "KarpenterNodeInstanceProfile-${local.name}"
+ role = module.eks.eks_managed_node_groups["default"].iam_role_name
- bottlerocket = {
- ami = data.aws_ami.bottlerocket.id
- launch_template_os = "bottlerocket"
- launch_template_prefix = "bottle"
- iam_instance_profile = module.eks_blueprints.managed_node_group_iam_instance_profile_id[0]
- vpc_security_group_ids = [module.eks_blueprints.worker_node_security_group_id]
- block_device_mappings = [
- {
- device_name = "/dev/xvda"
- volume_type = "gp3"
- volume_size = 200
- }
- ]
- }
- }
-
- tags = merge(local.tags, { Name = "karpenter" })
+ tags = local.tags
}
-# Deploying default provisioner and default-lt (using launch template) for Karpenter autoscaler
data "kubectl_path_documents" "karpenter_provisioners" {
- pattern = "${path.module}/provisioners/default_provisioner*.yaml" # without launch template
+ pattern = "${path.module}/provisioners/*_provisioner.yaml"
vars = {
- azs = join(",", local.azs)
- iam-instance-profile-id = "${local.name}-${local.node_group_name}"
- eks-cluster-id = local.name
- eks-vpc_name = local.name
+ azs = join(",", local.azs)
+ eks-cluster-id = local.name
+ eks-vpc_name = local.name
}
}
@@ -234,7 +192,7 @@ resource "kubectl_manifest" "datadog_agent" {
name: datadog
namespace: datadog-operator
spec:
- clusterName: ${module.eks_blueprints.eks_cluster_id}
+ clusterName: ${module.eks.cluster_id}
credentials:
apiSecret:
secretName: ${kubernetes_secret_v1.datadog_api_key.metadata[0].name}
@@ -264,7 +222,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -273,34 +230,12 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
-
-data "aws_ami" "eks" {
- owners = ["amazon"]
- most_recent = true
-
- filter {
- name = "name"
- values = ["amazon-eks-node-${module.eks_blueprints.eks_cluster_version}-*"]
- }
-}
-
-data "aws_ami" "bottlerocket" {
- owners = ["amazon"]
- most_recent = true
-
- filter {
- name = "name"
- values = ["bottlerocket-aws-k8s-${module.eks_blueprints.eks_cluster_version}-x86_64-*"]
- }
-}
diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/karpenter/outputs.tf
+++ b/examples/karpenter/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/karpenter/provisioners/default_provisioner.yaml b/examples/karpenter/provisioners/default_provisioner.yaml
index 39dde77cf2..f9349b6804 100644
--- a/examples/karpenter/provisioners/default_provisioner.yaml
+++ b/examples/karpenter/provisioners/default_provisioner.yaml
@@ -10,11 +10,13 @@ spec:
- key: "karpenter.sh/capacity-type"
operator: In
values: ["spot", "on-demand"]
+ - key: "kubernetes.io/arch"
+ operator: In
+ values: ["arm64", "amd64"]
limits:
resources:
cpu: 1000
provider:
- instanceProfile: ${iam-instance-profile-id}
subnetSelector:
Name: "${eks-vpc_name}-private*"
securityGroupSelector:
diff --git a/examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml b/examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml
deleted file mode 100644
index eb0db22c11..0000000000
--- a/examples/karpenter/provisioners/default_provisioner_with_launch_templates.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-apiVersion: karpenter.sh/v1alpha5
-kind: Provisioner
-metadata:
- name: default-lt
-spec:
- requirements:
- - key: "topology.kubernetes.io/zone"
- operator: In
- values: [${azs}] #Update the correct region and zones
- - key: "karpenter.sh/capacity-type"
- operator: In
- values: ["spot", "on-demand"]
- - key: "node.kubernetes.io/instance-type" #If not included, all instance types are considered
- operator: In
- values: ["m5.2xlarge", "m5.4xlarge"]
- - key: "kubernetes.io/arch" #If not included, all architectures are considered
- operator: In
- values: ["arm64", "amd64"]
- limits:
- resources:
- cpu: 1000
- provider:
- launchTemplate: "karpenter-${eks-cluster-id}" # Used by Karpenter Nodes
- subnetSelector:
- Name: "${eks-vpc_name}-private*"
- labels:
- type: karpenter
- provisioner: default-lt
- taints:
- - key: default-lt
- value: 'true'
- effect: NoSchedule
- ttlSecondsAfterEmpty: 120
diff --git a/examples/karpenter/provisioners/gpu_provisioner.yaml b/examples/karpenter/provisioners/gpu_provisioner.yaml
index 82d437b923..0e3cd51dde 100644
--- a/examples/karpenter/provisioners/gpu_provisioner.yaml
+++ b/examples/karpenter/provisioners/gpu_provisioner.yaml
@@ -12,7 +12,10 @@ spec:
value: true
effect: “NoSchedule”
provider:
- instanceProfile: ${iam-instance-profile-id}
+ subnetSelector:
+ Name: "${eks-vpc_name}-private*"
+ securityGroupSelector:
+ karpenter.sh/discovery/${eks-cluster-id}: '${eks-cluster-id}'
labels:
type: karpenter
provisioner: gpu
diff --git a/examples/karpenter/provisioners/sample_deployment_lt.yaml b/examples/karpenter/provisioners/sample_deployment_lt.yaml
deleted file mode 100644
index 1d80ccf9d6..0000000000
--- a/examples/karpenter/provisioners/sample_deployment_lt.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-#Same deployment app but target Karpenter createed with the launch-template provisioner
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: ecsdemo-nodejs-lt
- labels:
- app: ecsdemo-nodejs-lt
- namespace: default
-spec:
- replicas: 3
- selector:
- matchLabels:
- app: ecsdemo-nodejs-lt
- strategy:
- rollingUpdate:
- maxSurge: 25%
- maxUnavailable: 25%
- type: RollingUpdate
- template:
- metadata:
- labels:
- app: ecsdemo-nodejs-lt
- spec:
- containers:
- - image: brentley/ecsdemo-nodejs:latest
- imagePullPolicy: Always
- name: ecsdemo-nodejs-lt
- ports:
- - containerPort: 3000
- protocol: TCP
- resources:
- requests:
- memory: '512Mi'
- cpu: '1024m'
- limits:
- memory: '512Mi'
- cpu: '1024m'
- #Deploy this app on the Karpenter nodes created by the default-lt provisioner
- nodeSelector:
- type: 'karpenter'
- provisioner: 'default-lt'
- tolerations:
- - key: 'default-lt'
- operator: 'Exists'
- effect: 'NoSchedule'
diff --git a/examples/managed-workflow-apache-airflow/main.tf b/examples/managed-workflow-apache-airflow/main.tf
index 8d7fe33061..e1b9fb40ea 100644
--- a/examples/managed-workflow-apache-airflow/main.tf
+++ b/examples/managed-workflow-apache-airflow/main.tf
@@ -3,25 +3,24 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
-data "aws_caller_identity" "current" {}
locals {
name = "mwaa"
@@ -42,21 +41,50 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/485
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/494
- cluster_kms_key_additional_admin_arns = [data.aws_caller_identity.current.arn]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- # Add MWAA IAM Role to aws-auth configmap
- map_roles = [
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
+ }
+ }
+
+ manage_aws_auth_configmap = true
+ aws_auth_roles = [
{
rolearn = module.mwaa.mwaa_role_arn
username = "mwaa-role"
@@ -64,15 +92,6 @@ module "eks_blueprints" {
}
]
- managed_node_groups = {
- mg5 = {
- node_group_name = "mg5"
- instance_types = ["m5.large"]
- min_size = "2"
- disk_size = 100
- }
- }
-
tags = local.tags
}
@@ -83,10 +102,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_metrics_server = true
enable_cluster_autoscaler = true
@@ -264,16 +286,16 @@ locals {
kind = "Config"
current-context = "mwaa"
clusters = [{
- name = module.eks_blueprints.eks_cluster_arn
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = module.eks_blueprints.eks_cluster_certificate_authority_data
- server = module.eks_blueprints.eks_cluster_endpoint
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "mwaa" # must match KubernetesPodOperator context
context = {
- cluster = module.eks_blueprints.eks_cluster_arn
+ cluster = module.eks.cluster_id
user = "mwaa"
}
}]
@@ -321,7 +343,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -330,13 +351,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/managed-workflow-apache-airflow/outputs.tf b/examples/managed-workflow-apache-airflow/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/managed-workflow-apache-airflow/outputs.tf
+++ b/examples/managed-workflow-apache-airflow/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/multi-tenancy-with-teams/main.tf b/examples/multi-tenancy-with-teams/main.tf
index 65dc95723a..9abd0239ba 100644
--- a/examples/multi-tenancy-with-teams/main.tf
+++ b/examples/multi-tenancy-with-teams/main.tf
@@ -148,13 +148,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/adot-amp-grafana-for-haproxy/main.tf b/examples/observability/adot-amp-grafana-for-haproxy/main.tf
index 9aaeb53403..2a6f27f5dd 100644
--- a/examples/observability/adot-amp-grafana-for-haproxy/main.tf
+++ b/examples/observability/adot-amp-grafana-for-haproxy/main.tf
@@ -3,15 +3,15 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
@@ -22,13 +22,13 @@ provider "grafana" {
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
locals {
- name = basename(path.cwd)
+ name = "adot-haproxy"
region = var.aws_region
vpc_cidr = "10.0.0.0/16"
@@ -44,21 +44,26 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- t3_l = {
- node_group_name = "managed-ondemand"
- instance_types = ["t3.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -68,10 +73,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# enable AWS Managed EKS add-on for ADOT
enable_amazon_eks_adot = true
@@ -275,7 +283,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -284,13 +291,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/adot-amp-grafana-for-haproxy/outputs.tf b/examples/observability/adot-amp-grafana-for-haproxy/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/observability/adot-amp-grafana-for-haproxy/outputs.tf
+++ b/examples/observability/adot-amp-grafana-for-haproxy/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/observability/adot-amp-grafana-for-java/main.tf b/examples/observability/adot-amp-grafana-for-java/main.tf
index 856ea4f6a6..317f6cf31a 100644
--- a/examples/observability/adot-amp-grafana-for-java/main.tf
+++ b/examples/observability/adot-amp-grafana-for-java/main.tf
@@ -3,15 +3,15 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
@@ -22,13 +22,13 @@ provider "grafana" {
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
locals {
- name = basename(path.cwd)
+ name = "adot-java"
region = var.aws_region
vpc_cidr = "10.0.0.0/16"
@@ -44,21 +44,26 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- t3_l = {
- node_group_name = "managed-ondemand"
- instance_types = ["t3.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -68,10 +73,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# enable AWS Managed EKS add-on for ADOT
enable_amazon_eks_adot = true
@@ -197,7 +205,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -206,13 +213,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/adot-amp-grafana-for-java/outputs.tf b/examples/observability/adot-amp-grafana-for-java/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/observability/adot-amp-grafana-for-java/outputs.tf
+++ b/examples/observability/adot-amp-grafana-for-java/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/observability/adot-amp-grafana-for-memcached/main.tf b/examples/observability/adot-amp-grafana-for-memcached/main.tf
index 4dc96b6fac..828f8d2d4d 100644
--- a/examples/observability/adot-amp-grafana-for-memcached/main.tf
+++ b/examples/observability/adot-amp-grafana-for-memcached/main.tf
@@ -3,15 +3,15 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
@@ -22,13 +22,13 @@ provider "grafana" {
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
locals {
- name = basename(path.cwd)
+ name = "adot-memcached"
region = var.aws_region
vpc_cidr = "10.0.0.0/16"
@@ -44,21 +44,26 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- t3_l = {
- node_group_name = "managed-ondemand"
- instance_types = ["t3.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -68,10 +73,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# enable AWS Managed EKS add-on for ADOT
enable_amazon_eks_adot = true
@@ -194,7 +202,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -203,13 +210,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/adot-amp-grafana-for-memcached/outputs.tf b/examples/observability/adot-amp-grafana-for-memcached/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/observability/adot-amp-grafana-for-memcached/outputs.tf
+++ b/examples/observability/adot-amp-grafana-for-memcached/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/observability/adot-amp-grafana-for-nginx/main.tf b/examples/observability/adot-amp-grafana-for-nginx/main.tf
index 5fcc68496f..7f8802cadf 100644
--- a/examples/observability/adot-amp-grafana-for-nginx/main.tf
+++ b/examples/observability/adot-amp-grafana-for-nginx/main.tf
@@ -3,15 +3,15 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
@@ -22,13 +22,13 @@ provider "grafana" {
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
locals {
- name = basename(path.cwd)
+ name = "adot-nginx"
region = var.aws_region
vpc_cidr = "10.0.0.0/16"
@@ -44,21 +44,26 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- t3_l = {
- node_group_name = "managed-ondemand"
- instance_types = ["t3.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -68,10 +73,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# enable AWS Managed EKS add-on for ADOT
enable_amazon_eks_adot = true
@@ -212,7 +220,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -221,13 +228,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/adot-amp-grafana-for-nginx/outputs.tf b/examples/observability/adot-amp-grafana-for-nginx/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/observability/adot-amp-grafana-for-nginx/outputs.tf
+++ b/examples/observability/adot-amp-grafana-for-nginx/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/observability/amp-amg-opensearch/data.tf b/examples/observability/amp-amg-opensearch/data.tf
index 4229b4023b..1788e235e7 100644
--- a/examples/observability/amp-amg-opensearch/data.tf
+++ b/examples/observability/amp-amg-opensearch/data.tf
@@ -1,11 +1,3 @@
-data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
-}
-
-data "aws_availability_zones" "available" {}
-
-data "aws_caller_identity" "current" {}
-
data "aws_iam_policy_document" "fluentbit_opensearch_access" {
# Identity Based Policy specifies a list of IAM permissions
# that principal has against OpenSearch service API
diff --git a/examples/observability/amp-amg-opensearch/main.tf b/examples/observability/amp-amg-opensearch/main.tf
index 961131aef2..b9cc4442aa 100644
--- a/examples/observability/amp-amg-opensearch/main.tf
+++ b/examples/observability/amp-amg-opensearch/main.tf
@@ -3,15 +3,15 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
@@ -21,6 +21,13 @@ provider "grafana" {
auth = var.grafana_api_key
}
+data "aws_eks_cluster_auth" "this" {
+ name = module.eks.cluster_id
+}
+
+data "aws_caller_identity" "current" {}
+data "aws_availability_zones" "available" {}
+
locals {
name = basename(path.cwd)
region = "us-west-2"
@@ -37,21 +44,27 @@ locals {
#---------------------------------------------------------------
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../.."
+
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.xlarge"]
- min_size = 3
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -61,10 +74,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# Add-ons
enable_metrics_server = true
@@ -246,7 +262,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -255,13 +270,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/observability/amp-amg-opensearch/outputs.tf b/examples/observability/amp-amg-opensearch/outputs.tf
index 96029761af..26e7d110aa 100644
--- a/examples/observability/amp-amg-opensearch/outputs.tf
+++ b/examples/observability/amp-amg-opensearch/outputs.tf
@@ -1,3 +1,8 @@
+output "configure_kubectl" {
+ description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
+}
+
output "opensearch_pw" {
description = "Amazon OpenSearch Service Domain password"
value = var.opensearch_dashboard_pw
@@ -13,8 +18,3 @@ output "opensearch_vpc_endpoint" {
description = "Amazon OpenSearch Service Domain-specific endpoint"
value = aws_elasticsearch_domain.opensearch.endpoint
}
-
-output "configure_kubectl" {
- description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
-}
diff --git a/examples/secrets-management/csi-secrets-driver/main.tf b/examples/secrets-management/csi-secrets-driver/main.tf
index 23b2cac47e..ef32a9916a 100644
--- a/examples/secrets-management/csi-secrets-driver/main.tf
+++ b/examples/secrets-management/csi-secrets-driver/main.tf
@@ -3,37 +3,36 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
locals {
- name = basename(path.cwd)
- cluster_name = local.name
- region = "us-west-2"
+ name = basename(path.cwd)
+ region = "us-west-2"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
@@ -50,21 +49,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../../../"
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
- cluster_name = local.cluster_name
+ cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -74,10 +97,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
#K8s Add-ons
enable_secrets_store_csi_driver = true
@@ -115,10 +141,6 @@ resource "aws_secretsmanager_secret_version" "sversion" {
EOT
}
-#------------------------------------------------------------------------------------
-# This creates a IAM Policy content limiting access to the secret in Secrets Manager
-#------------------------------------------------------------------------------------
-
data "aws_iam_policy_document" "secrets_management_policy" {
statement {
sid = ""
@@ -133,27 +155,19 @@ data "aws_iam_policy_document" "secrets_management_policy" {
}
}
-#---------------------------------------------------------------
-# Creating IAM Policy to be attached to the IRSA Role
-#---------------------------------------------------------------
resource "aws_iam_policy" "this" {
description = "Sample application IAM Policy for IRSA"
- name = "${module.eks_blueprints.eks_cluster_id}-${local.application}-irsa"
+ name = "${module.eks.cluster_id}-${local.application}-irsa"
policy = data.aws_iam_policy_document.secrets_management_policy.json
}
-#---------------------------------------------------------------
-# Creating IAM Role for Service Account
-#---------------------------------------------------------------
module "iam_role_service_account" {
source = "../../../modules/irsa"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_oidc_provider_arn = module.eks_blueprints.eks_oidc_provider_arn
+ eks_cluster_id = module.eks.cluster_id
+ eks_oidc_provider_arn = module.eks.oidc_provider_arn
kubernetes_namespace = local.application
kubernetes_service_account = "${local.application}-sa"
irsa_iam_policies = [aws_iam_policy.this.arn]
-
- depends_on = [module.eks_blueprints]
}
#---------------------------------------------------------------
@@ -225,7 +239,11 @@ resource "kubectl_manifest" "sample_nginx" {
]
}
})
- depends_on = [kubectl_manifest.csi_secrets_store_crd, module.iam_role_service_account]
+
+ depends_on = [
+ kubectl_manifest.csi_secrets_store_crd,
+ module.iam_role_service_account,
+ ]
}
#---------------------------------------------------------------
@@ -246,7 +264,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -255,13 +272,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/secrets-management/csi-secrets-driver/outputs.tf b/examples/secrets-management/csi-secrets-driver/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/secrets-management/csi-secrets-driver/outputs.tf
+++ b/examples/secrets-management/csi-secrets-driver/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf
index 5700eb6f30..fc6a729c46 100644
--- a/examples/stateful/main.tf
+++ b/examples/stateful/main.tf
@@ -3,21 +3,21 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_availability_zones" "available" {}
@@ -39,20 +39,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- velero = {
- node_group_name = "velero"
- launch_template_os = "amazonlinux2eks"
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -62,10 +87,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_velero = true
velero_backup_s3_bucket = module.velero_backup_s3_bucket.s3_bucket_id
@@ -103,7 +131,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -112,13 +139,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/stateful/outputs.tf b/examples/stateful/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/stateful/outputs.tf
+++ b/examples/stateful/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf
index 810c11193b..110630750e 100644
--- a/examples/tls-with-aws-pca-issuer/main.tf
+++ b/examples/tls-with-aws-pca-issuer/main.tf
@@ -4,22 +4,22 @@ provider "aws" {
provider "kubectl" {
apply_retry_count = 10
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
data "aws_partition" "current" {}
@@ -42,21 +42,45 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = module.vpc.private_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- managed_node_groups = {
- mg_5 = {
- node_group_name = "managed-ondemand"
- instance_types = ["m5.large"]
- min_size = 2
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
+
+ min_size = 1
+ max_size = 3
+ desired_size = 1
}
}
@@ -66,10 +90,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
# EKS Managed Add-ons
enable_amazon_eks_vpc_cni = true
@@ -137,7 +164,7 @@ resource "kubectl_manifest" "cluster_pca_issuer" {
kind = "AWSPCAClusterIssuer"
metadata = {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
spec = {
@@ -169,10 +196,10 @@ resource "kubectl_manifest" "example_pca_certificate" {
issuerRef = {
group = "awspca.cert-manager.io"
kind = "AWSPCAClusterIssuer"
- name : module.eks_blueprints.eks_cluster_id
+ name : module.eks.cluster_id
}
renewBefore = "360h0m0s"
- secretName = join("-", [var.certificate_name, "clusterissuer"]) # This is the name with which the K8 Secret will be available
+ secretName = "${var.certificate_name}-clusterissuer" # This is the name with which the K8 Secret will be available
usages = [
"server auth",
"client auth"
@@ -209,7 +236,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -218,13 +244,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/tls-with-aws-pca-issuer/outputs.tf b/examples/tls-with-aws-pca-issuer/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/tls-with-aws-pca-issuer/outputs.tf
+++ b/examples/tls-with-aws-pca-issuer/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/examples/vpc-cni-custom-networking/main.tf b/examples/vpc-cni-custom-networking/main.tf
index 57e330955e..2a902f7082 100644
--- a/examples/vpc-cni-custom-networking/main.tf
+++ b/examples/vpc-cni-custom-networking/main.tf
@@ -3,40 +3,37 @@ provider "aws" {
}
provider "kubernetes" {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
provider "helm" {
kubernetes {
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
token = data.aws_eks_cluster_auth.this.token
}
}
provider "kubectl" {
apply_retry_count = 10
- host = module.eks_blueprints.eks_cluster_endpoint
- cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data)
+ host = module.eks.cluster_endpoint
+ cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
token = data.aws_eks_cluster_auth.this.token
}
data "aws_eks_cluster_auth" "this" {
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
}
-data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = basename(path.cwd)
region = "us-west-2"
- cluster_version = "1.23"
-
azs = slice(data.aws_availability_zones.available.names, 0, 3)
vpc_cidr = "10.0.0.0/16"
secondary_vpc_cidr = "10.99.0.0/16"
@@ -51,49 +48,75 @@ locals {
# EKS Blueprints
#---------------------------------------------------------------
-module "eks_blueprints" {
- source = "../.."
+module "eks" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 18.30"
cluster_name = local.name
- cluster_version = local.cluster_version
+ cluster_version = "1.23"
- vpc_id = module.vpc.vpc_id
- private_subnet_ids = slice(module.vpc.private_subnets, 0, 3)
- control_plane_subnet_ids = module.vpc.intra_subnets
+ cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
+ cluster_endpoint_private_access = true
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/485
- # https://github.com/aws-ia/terraform-aws-eks-blueprints/issues/494
- cluster_kms_key_additional_admin_arns = [data.aws_caller_identity.current.arn]
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
- managed_node_groups = {
- custom_networking = {
- node_group_name = "custom-net"
+ node_security_group_additional_rules = {
+ ingress_nodes_ephemeral = {
+ description = "Node-to-node on ephemeral ports"
+ protocol = "tcp"
+ from_port = 1025
+ to_port = 65535
+ type = "ingress"
+ self = true
+ }
+ egress_all = {
+ description = "Allow all egress"
+ protocol = "-1"
+ from_port = 0
+ to_port = 0
+ type = "egress"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ }
+
+ eks_managed_node_groups = {
+ default = {
+ instance_types = ["m5.large"]
min_size = 1
max_size = 3
- desired_size = 2
-
- custom_ami_id = data.aws_ssm_parameter.eks_optimized_ami.value
- instance_types = ["m5.xlarge"]
-
- create_launch_template = true
- launch_template_os = "amazonlinux2eks"
+ desired_size = 1
# https://docs.aws.amazon.com/eks/latest/userguide/choosing-instance-type.html#determine-max-pods
- pre_userdata = <<-EOT
+ # These settings opt out of the default behavior and use the maximum number of pods, with a cap of 110 due to
+ # Kubernetes guidance https://kubernetes.io/docs/setup/best-practices/cluster-large/
+ # See more info here https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html
+ # See issue https://github.com/awslabs/amazon-eks-ami/issues/844
+ pre_bootstrap_user_data = <<-EOT
+ #!/bin/bash
+ set -ex
+
MAX_PODS=$(/etc/eks/max-pods-calculator.sh \
--instance-type-from-imds \
--cni-version ${trimprefix(data.aws_eks_addon_version.latest["vpc-cni"].version, "v")} \
--cni-prefix-delegation-enabled \
--cni-custom-networking-enabled \
)
+
+ cat <<-EOF > /etc/profile.d/bootstrap.sh
+ export CONTAINER_RUNTIME="containerd"
+ export USE_MAX_PODS=false
+ export KUBELET_EXTRA_ARGS="--max-pods=$${MAX_PODS}"
+ EOF
+
+ # Source extra environment variables in bootstrap script
+ sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh
EOT
- # These settings opt out of the default behavior and use the maximum number of pods, with a cap of 110 due to
- # Kubernetes guidance https://kubernetes.io/docs/setup/best-practices/cluster-large/
- # See more info here https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html
- kubelet_extra_args = "--max-pods=$${MAX_PODS}"
- bootstrap_extra_args = "--use-max-pods false"
+ update_config = {
+ max_unavailable_percentage = 33
+ }
}
}
@@ -103,10 +126,13 @@ module "eks_blueprints" {
module "eks_blueprints_kubernetes_addons" {
source = "../../modules/kubernetes-addons"
- eks_cluster_id = module.eks_blueprints.eks_cluster_id
- eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint
- eks_oidc_provider = module.eks_blueprints.oidc_provider
- eks_cluster_version = module.eks_blueprints.eks_cluster_version
+ eks_cluster_id = module.eks.cluster_id
+ eks_cluster_endpoint = module.eks.cluster_endpoint
+ eks_oidc_provider = module.eks.oidc_provider
+ eks_cluster_version = module.eks.cluster_version
+
+ # Wait on the `kube-system` profile before provisioning addons
+ data_plane_wait_arn = module.eks.eks_managed_node_groups["default"].node_group_arn
enable_amazon_eks_vpc_cni = true
amazon_eks_vpc_cni_config = {
@@ -129,7 +155,7 @@ data "aws_eks_addon_version" "latest" {
for_each = toset(["vpc-cni"])
addon_name = each.value
- kubernetes_version = module.eks_blueprints.eks_cluster_version
+ kubernetes_version = module.eks.cluster_version
most_recent = true
}
@@ -143,16 +169,16 @@ locals {
kind = "Config"
current-context = "terraform"
clusters = [{
- name = module.eks_blueprints.eks_cluster_id
+ name = module.eks.cluster_id
cluster = {
- certificate-authority-data = module.eks_blueprints.eks_cluster_certificate_authority_data
- server = module.eks_blueprints.eks_cluster_endpoint
+ certificate-authority-data = module.eks.cluster_certificate_authority_data
+ server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
- cluster = module.eks_blueprints.eks_cluster_id
+ cluster = module.eks.cluster_id
user = "terraform"
}
}]
@@ -204,8 +230,8 @@ resource "kubectl_manifest" "eni_config" {
}
spec = {
securityGroups = [
- module.eks_blueprints.cluster_primary_security_group_id,
- module.eks_blueprints.worker_node_security_group_id,
+ module.eks.cluster_primary_security_group_id,
+ module.eks.node_security_group_id,
]
subnet = each.value
}
@@ -216,10 +242,6 @@ resource "kubectl_manifest" "eni_config" {
# Supporting Resources
#---------------------------------------------------------------
-data "aws_ssm_parameter" "eks_optimized_ami" {
- name = "/aws/service/eks/optimized-ami/${local.cluster_version}/amazon-linux-2/recommended/image_id"
-}
-
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
@@ -241,7 +263,6 @@ module "vpc" {
single_nat_gateway = true
enable_dns_hostnames = true
- # Manage so we can name
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.name}-default" }
manage_default_route_table = true
@@ -250,13 +271,11 @@ module "vpc" {
default_security_group_tags = { Name = "${local.name}-default" }
public_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/elb" = 1
+ "kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
- "kubernetes.io/cluster/${local.name}" = "shared"
- "kubernetes.io/role/internal-elb" = 1
+ "kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
diff --git a/examples/vpc-cni-custom-networking/outputs.tf b/examples/vpc-cni-custom-networking/outputs.tf
index 55552d3138..b7decade8e 100644
--- a/examples/vpc-cni-custom-networking/outputs.tf
+++ b/examples/vpc-cni-custom-networking/outputs.tf
@@ -1,4 +1,4 @@
output "configure_kubectl" {
description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig"
- value = module.eks_blueprints.configure_kubectl
+ value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}"
}
diff --git a/modules/aws-eks-fargate-profiles/README.md b/modules/aws-eks-fargate-profiles/README.md
index b749ffe439..32b6aba0fa 100644
--- a/modules/aws-eks-fargate-profiles/README.md
+++ b/modules/aws-eks-fargate-profiles/README.md
@@ -4,8 +4,6 @@
The Fargate profile allows you to declare which pods run on Fargate for Amazon EKS Cluster. This declaration is done through the profile’s selectors. Each profile can have up to five selectors that contain a namespace and optional labels. You must define a namespace for every selector. The label field consists of multiple optional key-value pairs
-Checkout the usage docs for Fargate Profiles [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/node-groups/)
-
## Requirements
diff --git a/modules/aws-eks-managed-node-groups/README.md b/modules/aws-eks-managed-node-groups/README.md
index c075a2faee..f3e160c773 100644
--- a/modules/aws-eks-managed-node-groups/README.md
+++ b/modules/aws-eks-managed-node-groups/README.md
@@ -12,8 +12,6 @@ _NOTE_:
- You can create self-managed nodes in an AWS Region where you have AWS Outposts, AWS Wavelength, or AWS Local Zones enabled
- You should not set to true both `create_launch_template` and `remote_access` or you'll end-up with new managed nodegroups that won't be able to join the cluster.
-Checkout the usage docs for Managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/node-groups/)
-
## Requirements
diff --git a/modules/aws-eks-self-managed-node-groups/README.md b/modules/aws-eks-self-managed-node-groups/README.md
index 78f732b9dc..cbf0a743bd 100644
--- a/modules/aws-eks-self-managed-node-groups/README.md
+++ b/modules/aws-eks-self-managed-node-groups/README.md
@@ -6,8 +6,6 @@ Amazon EKS Self Managed Node Groups lets you create, update, scale, and terminat
This module allows you to create on-demand or spot self managed Linux or Windows nodegroups. You can instantiate the module once with map of node group values to create multiple self managed node groups. By default, the module uses the latest available version of Amazon-provided EKS-optimized AMIs for Amazon Linux 2, Bottlerocket, or Windows 2019 Server Core operating systems. You can override the image via the custom_ami_id input variable.
-Checkout the usage docs for Self-managed Node groups [examples](https://aws-ia.github.io/terraform-aws-eks-blueprints/latest/node-groups/)
-
## Requirements
diff --git a/modules/kubernetes-addons/aws-privateca-issuer/data.tf b/modules/kubernetes-addons/aws-privateca-issuer/data.tf
deleted file mode 100644
index f6845d5867..0000000000
--- a/modules/kubernetes-addons/aws-privateca-issuer/data.tf
+++ /dev/null
@@ -1,11 +0,0 @@
-data "aws_iam_policy_document" "aws_privateca_issuer" {
- statement {
- effect = "Allow"
- resources = [var.aws_privateca_acmca_arn]
- actions = [
- "acm-pca:DescribeCertificateAuthority",
- "acm-pca:GetCertificate",
- "acm-pca:IssueCertificate",
- ]
- }
-}
diff --git a/modules/kubernetes-addons/aws-privateca-issuer/main.tf b/modules/kubernetes-addons/aws-privateca-issuer/main.tf
index f3ae562cea..db5e5c2f18 100644
--- a/modules/kubernetes-addons/aws-privateca-issuer/main.tf
+++ b/modules/kubernetes-addons/aws-privateca-issuer/main.tf
@@ -7,6 +7,18 @@ module "helm_addon" {
addon_context = var.addon_context
}
+data "aws_iam_policy_document" "aws_privateca_issuer" {
+ statement {
+ effect = "Allow"
+ resources = [var.aws_privateca_acmca_arn]
+ actions = [
+ "acm-pca:DescribeCertificateAuthority",
+ "acm-pca:GetCertificate",
+ "acm-pca:IssueCertificate",
+ ]
+ }
+}
+
resource "aws_iam_policy" "aws_privateca_issuer" {
description = "AWS PCA issuer IAM policy"
name = "${var.addon_context.eks_cluster_id}-${local.helm_config["name"]}-irsa"
diff --git a/test/src/eks_blueprints_e2e_test.go b/test/src/eks_blueprints_e2e_test.go
index 650e2dd724..44e6adeafe 100644
--- a/test/src/eks_blueprints_e2e_test.go
+++ b/test/src/eks_blueprints_e2e_test.go
@@ -42,7 +42,7 @@ var (
destroyModules = []string{
"module.eks_blueprints_kubernetes_addons",
- "module.eks_blueprints",
+ "module.eks",
"module.vpc",
"full_destroy",
}