From ffec553ae703e3ed853e63c29518b705ee9b7a22 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Fri, 28 Oct 2022 19:12:09 -0400 Subject: [PATCH] fix: Update appmesh example that was recently added --- .github/workflows/e2e-parallel-destroy.yml | 1 - .github/workflows/e2e-parallel-full.yml | 1 - docs/add-ons/nginx.md | 4 +- examples/appmesh-mtls/main.tf | 60 ++++++++++------------ examples/appmesh-mtls/outputs.tf | 2 +- 5 files changed, 30 insertions(+), 38 deletions(-) diff --git a/.github/workflows/e2e-parallel-destroy.yml b/.github/workflows/e2e-parallel-destroy.yml index 7c98a02f43..51c2304ac1 100644 --- a/.github/workflows/e2e-parallel-destroy.yml +++ b/.github/workflows/e2e-parallel-destroy.yml @@ -30,7 +30,6 @@ jobs: - example_path: examples/fargate-serverless # - example_path: examples/fully-private-eks-cluster # skipping until issue #711 is addressed - example_path: examples/gitops/argocd - # - example_path: examples/ingress-controllers/nginx # ignoring due to https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1629 - example_path: examples/ipv6-eks-cluster - example_path: examples/karpenter - example_path: examples/multi-tenancy-with-teams diff --git a/.github/workflows/e2e-parallel-full.yml b/.github/workflows/e2e-parallel-full.yml index 1a1bb134bc..46337edb21 100644 --- a/.github/workflows/e2e-parallel-full.yml +++ b/.github/workflows/e2e-parallel-full.yml @@ -34,7 +34,6 @@ jobs: - example_path: examples/fargate-serverless # - example_path: examples/fully-private-eks-cluster # skipping until issue #711 - example_path: examples/gitops/argocd - # - example_path: examples/ingress-controllers/nginx # ignoring due to https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/1629 - example_path: examples/ipv6-eks-cluster - example_path: examples/karpenter - example_path: examples/multi-tenancy-with-teams diff --git a/docs/add-ons/nginx.md b/docs/add-ons/nginx.md index 39ef23a76e..3e67f65337 100644 --- a/docs/add-ons/nginx.md +++ b/docs/add-ons/nginx.md @@ -6,7 +6,7 @@ Other than handling Kubernetes ingress objects, this ingress controller can faci ## Usage -Nginx Ingress Controller can be deployed by enabling the add-on via the following. Check out the full [example](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/examples/ingress-controllers/nginx/main.tf) to deploy the EKS Cluster with Nginx Ingress Controller. +Nginx Ingress Controller can be deployed by enabling the add-on via the following. ```hcl enable_ingress_nginx = true @@ -41,7 +41,7 @@ You can optionally customize the Helm chart that deploys `nginx` via the followi The following properties are made available for use when managing the add-on via GitOps. -Refer to [locals.tf](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/modules/kubernetes-addons/ingress-nginx/locals.tf) for latest config. GitOps with ArgoCD Add-on repo is located [here](https://github.com/aws-samples/eks-blueprints-add-ons/blob/main/chart/values.yaml) +GitOps with ArgoCD Add-on repo is located [here](https://github.com/aws-samples/eks-blueprints-add-ons/blob/main/chart/values.yaml) ``` hcl argocd_gitops_config = { diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 73eaf51025..a672c51f02 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -3,29 +3,29 @@ provider "aws" { } provider "kubernetes" { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } provider "helm" { kubernetes { - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) token = data.aws_eks_cluster_auth.this.token } } provider "kubectl" { apply_retry_count = 10 - host = module.eks_blueprints.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false token = data.aws_eks_cluster_auth.this.token } data "aws_eks_cluster_auth" "this" { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } data "aws_availability_zones" "available" {} @@ -48,28 +48,24 @@ locals { # EKS Blueprints #--------------------------------------------------------------- -module "eks_blueprints" { - source = "../.." +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 18.30" - cluster_name = local.name - cluster_version = "1.23" + cluster_name = local.name + cluster_version = "1.23" + cluster_enabled_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - vpc_id = module.vpc.vpc_id - private_subnet_ids = module.vpc.private_subnets + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets - managed_node_groups = { - this = { - node_group_name = local.name - instance_types = ["m5.large"] - subnet_ids = module.vpc.private_subnets + eks_managed_node_groups = { + default = { + instance_types = ["m5.large"] min_size = 1 - max_size = 2 + max_size = 3 desired_size = 1 - - update_config = [{ - max_unavailable_percentage = 30 - }] } } @@ -79,10 +75,10 @@ module "eks_blueprints" { module "eks_blueprints_kubernetes_addons" { source = "../../modules/kubernetes-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint - eks_oidc_provider = module.eks_blueprints.oidc_provider - eks_cluster_version = module.eks_blueprints.eks_cluster_version + eks_cluster_id = module.eks.cluster_id + eks_cluster_endpoint = module.eks.cluster_endpoint + eks_oidc_provider = module.eks.oidc_provider + eks_cluster_version = module.eks.cluster_version eks_cluster_domain = var.eks_cluster_domain enable_amazon_eks_vpc_cni = true @@ -141,7 +137,7 @@ resource "kubectl_manifest" "cluster_pca_issuer" { kind = "AWSPCAClusterIssuer" metadata = { - name = module.eks_blueprints.eks_cluster_id + name = module.eks.cluster_id } spec = { @@ -169,7 +165,7 @@ resource "kubectl_manifest" "example_pca_certificate" { issuerRef = { group = "awspca.cert-manager.io" kind = "AWSPCAClusterIssuer" - name : module.eks_blueprints.eks_cluster_id + name : module.eks.cluster_id } renewBefore = "360h0m0s" # This is the name with which the K8 Secret will be available @@ -218,13 +214,11 @@ module "vpc" { default_security_group_tags = { Name = "${local.name}-default" } public_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/elb" = 1 + "kubernetes.io/role/elb" = 1 } private_subnet_tags = { - "kubernetes.io/cluster/${local.name}" = "shared" - "kubernetes.io/role/internal-elb" = 1 + "kubernetes.io/role/internal-elb" = 1 } tags = local.tags diff --git a/examples/appmesh-mtls/outputs.tf b/examples/appmesh-mtls/outputs.tf index 55552d3138..b7decade8e 100644 --- a/examples/appmesh-mtls/outputs.tf +++ b/examples/appmesh-mtls/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_id}" }