forked from cloudposse/terraform-aws-eks-node-group
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
217 lines (172 loc) · 7.01 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
provider "aws" {
region = var.region
}
module "label" {
source = "cloudposse/label/null"
version = "0.25.0"
# This is the preferred way to add attributes. It will put "cluster" last
# after any attributes set in `var.attributes` or `context.attributes`.
# In this case, we do not care, because we are only using this instance
# of this module to create tags.
attributes = ["cluster"]
context = module.this.context
}
data "aws_caller_identity" "current" {}
data "aws_iam_session_context" "current" {
arn = data.aws_caller_identity.current.arn
}
locals {
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
# https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
allow_all_ingress_rule = {
key = "allow_all_ingress"
type = "ingress"
from_port = 0
to_port = 0 # [sic] from and to port ignored when protocol is "-1", warning if not zero
protocol = "-1"
description = "Allow all ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
allow_http_ingress_rule = {
key = "http"
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
description = "Allow HTTP ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
extra_policy_arn = "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess"
# Enable the IAM user creating the cluster to administer it,
# without using the bootstrap_cluster_creator_admin_permissions option,
# as a way to test the access_entry_map feature.
# In general, this is not recommended. Instead, you should
# create the access_entry_map statically, with the ARNs you want to
# have access to the cluster. We do it dynamically here just for testing purposes.
# See the original PR for more information:
# https://github.com/cloudposse/terraform-aws-eks-cluster/pull/206
access_entry_map = {
(data.aws_iam_session_context.current.issuer_arn) = {
access_policy_associations = {
ClusterAdmin = {}
}
}
}
}
module "vpc" {
source = "cloudposse/vpc/aws"
version = "2.2.0"
ipv4_primary_cidr_block = var.vpc_cidr_block
context = module.this.context
}
module "subnets" {
source = "cloudposse/dynamic-subnets/aws"
version = "2.4.2"
availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
nat_gateway_enabled = false
nat_instance_enabled = false
context = module.this.context
}
module "ssh_source_access" {
source = "cloudposse/security-group/aws"
version = "2.2.0"
attributes = ["ssh", "source"]
security_group_description = "Test source security group ssh access only"
allow_all_egress = true
rules = [local.allow_all_ingress_rule]
# rules_map = { ssh_source = [local.allow_all_ingress_rule] }
vpc_id = module.vpc.vpc_id
context = module.label.context
}
module "https_sg" {
source = "cloudposse/security-group/aws"
version = "2.2.0"
attributes = ["http"]
security_group_description = "Allow http access"
allow_all_egress = true
rules = [local.allow_http_ingress_rule]
vpc_id = module.vpc.vpc_id
context = module.label.context
}
module "eks_cluster" {
source = "cloudposse/eks-cluster/aws"
version = "4.2.0"
region = var.region
subnet_ids = module.subnets.public_subnet_ids
kubernetes_version = var.kubernetes_version
oidc_provider_enabled = var.oidc_provider_enabled
enabled_cluster_log_types = var.enabled_cluster_log_types
cluster_log_retention_period = var.cluster_log_retention_period
access_config = {
authentication_mode = "API"
bootstrap_cluster_creator_admin_permissions = false
}
access_entry_map = local.access_entry_map
context = module.this.context
}
module "eks_node_group" {
source = "../../"
subnet_ids = module.this.enabled ? module.subnets.public_subnet_ids : ["filler_string_for_enabled_is_false"]
cluster_name = module.this.enabled ? module.eks_cluster.eks_cluster_id : "disabled"
instance_types = var.instance_types
desired_size = var.desired_size
min_size = var.min_size
max_size = var.max_size
kubernetes_version = [var.kubernetes_version]
kubernetes_labels = merge(var.kubernetes_labels, { attributes = coalesce(join(module.this.delimiter, module.this.attributes), "none") })
kubernetes_taints = var.kubernetes_taints
cluster_autoscaler_enabled = true
block_device_mappings = [{
device_name = "/dev/xvda"
volume_size = 20
volume_type = "gp2"
encrypted = true
delete_on_termination = true
}]
ec2_ssh_key_name = var.ec2_ssh_key_name
ssh_access_security_group_ids = [module.ssh_source_access.id]
associated_security_group_ids = [module.ssh_source_access.id, module.https_sg.id]
node_role_policy_arns = [local.extra_policy_arn]
update_config = var.update_config
ami_type = var.ami_type
ami_release_version = var.ami_release_version
before_cluster_joining_userdata = var.before_cluster_joining_userdata
kubelet_additional_options = var.kubelet_additional_options
after_cluster_joining_userdata = var.after_cluster_joining_userdata
create_before_destroy = true
force_update_version = var.force_update_version
replace_node_group_on_version_update = var.replace_node_group_on_version_update
node_group_terraform_timeouts = [{
create = "25m"
delete = "20m"
}]
context = module.this.context
}
module "eks_node_group_minimal" {
source = "../../"
# We need to do something to avoid a name clash with the Node Role.
# Easiest thing to do is reuse the node role created by the other node group.
node_role_arn = [module.eks_node_group.eks_node_group_role_arn]
subnet_ids = module.this.enabled ? module.subnets.public_subnet_ids : ["filler_string_for_enabled_is_false"]
cluster_name = module.this.enabled ? module.eks_cluster.eks_cluster_id : "disabled"
instance_types = var.instance_types
desired_size = var.desired_size
min_size = var.min_size
max_size = var.max_size
kubernetes_version = [var.kubernetes_version]
ami_type = var.ami_type
ami_release_version = var.ami_release_version
node_group_terraform_timeouts = [{
create = "15m"
delete = "20m"
}]
context = module.this.context
}