From 0259865dec98e9572784d0efaeba4fbec78a3c78 Mon Sep 17 00:00:00 2001 From: Kyle Smith Date: Fri, 8 Jun 2018 11:26:47 -0400 Subject: [PATCH 1/3] Fixes for Smart Columbus OS deployment --- main.tf | 165 ++++++++++++++++++++++++++------------------------- outputs.tf | 25 ++++---- variables.tf | 65 +++++++++++--------- 3 files changed, 134 insertions(+), 121 deletions(-) diff --git a/main.tf b/main.tf index b219ff3..4b7d0d1 100644 --- a/main.tf +++ b/main.tf @@ -4,7 +4,8 @@ # Retrieve AWS credentials from env variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY provider "aws" { - region = "${var.aws_region}" + region = "${var.aws_region}" + profile = "${var.aws_profile}" } ##### @@ -61,7 +62,7 @@ resource "aws_iam_policy_attachment" "master-attach" { } resource "aws_iam_instance_profile" "master_profile" { - name = "${var.cluster_name}-master" + name = "${var.cluster_name}-master" role = "${aws_iam_role.master_role.name}" } @@ -107,7 +108,7 @@ resource "aws_iam_policy_attachment" "node-attach" { } resource "aws_iam_instance_profile" "node_profile" { - name = "${var.cluster_name}-node" + name = "${var.cluster_name}-node" role = "${aws_iam_role.node_role.name}" } @@ -122,51 +123,51 @@ data "aws_subnet" "cluster_subnet" { resource "aws_security_group" "kubernetes" { vpc_id = "${data.aws_subnet.cluster_subnet.vpc_id}" - name = "${var.cluster_name}" + name = "${var.cluster_name}" tags = "${merge(map("Name", var.cluster_name, format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" } # Allow outgoing connectivity resource "aws_security_group_rule" "allow_all_outbound_from_kubernetes" { - type = "egress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.kubernetes.id}" + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow SSH connections only from specific CIDR (TODO) resource "aws_security_group_rule" "allow_ssh_from_cidr" { - count = "${length(var.ssh_access_cidr)}" - type = "ingress" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["${var.ssh_access_cidr[count.index]}"] - security_group_id = "${aws_security_group.kubernetes.id}" + count = "${length(var.ssh_access_cidr)}" + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.ssh_access_cidr[count.index]}"] + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow the security group members to talk with each other without restrictions resource "aws_security_group_rule" "allow_cluster_crosstalk" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "-1" - source_security_group_id = "${aws_security_group.kubernetes.id}" - security_group_id = "${aws_security_group.kubernetes.id}" + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + source_security_group_id = "${aws_security_group.kubernetes.id}" + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow API connections only from specific CIDR (TODO) resource "aws_security_group_rule" "allow_api_from_cidr" { - count = "${length(var.api_access_cidr)}" - type = "ingress" - from_port = 6443 - to_port = 6443 - protocol = "tcp" - cidr_blocks = ["${var.api_access_cidr[count.index]}"] - security_group_id = "${aws_security_group.kubernetes.id}" + count = "${length(var.api_access_cidr)}" + type = "ingress" + from_port = 6443 + to_port = 6443 + protocol = "tcp" + cidr_blocks = ["${var.api_access_cidr[count.index]}"] + security_group_id = "${aws_security_group.kubernetes.id}" } ########## @@ -187,7 +188,6 @@ data "template_file" "init_master" { asg_min_nodes = "${var.min_worker_count}" asg_max_nodes = "${var.max_worker_count}" aws_subnets = "${join(" ", concat(var.worker_subnet_ids, list(var.master_subnet_id)))}" - } } @@ -201,11 +201,11 @@ data "template_file" "init_node" { } data "template_file" "cloud_init_config" { - template = "${file("${path.module}/scripts/cloud-init-config.yaml")}" + template = "${file("${path.module}/scripts/cloud-init-config.yaml")}" - vars { - calico_yaml = "${base64gzip("${file("${path.module}/scripts/calico.yaml")}")}" - } + vars { + calico_yaml = "${base64gzip("${file("${path.module}/scripts/calico.yaml")}")}" + } } data "template_cloudinit_config" "master_cloud_init" { @@ -241,7 +241,7 @@ data "template_cloudinit_config" "node_cloud_init" { ########## resource "aws_key_pair" "keypair" { - key_name = "${var.cluster_name}" + key_name = "${var.cluster_name}" public_key = "${file(var.ssh_public_key)}" } @@ -251,7 +251,7 @@ resource "aws_key_pair" "keypair" { data "aws_ami" "centos7" { most_recent = true - owners = ["aws-marketplace"] + owners = ["aws-marketplace"] filter { name = "product-code" @@ -274,43 +274,43 @@ data "aws_ami" "centos7" { ##### resource "aws_eip" "master" { - vpc = true + vpc = true } resource "aws_instance" "master" { - instance_type = "${var.master_instance_type}" + instance_type = "${var.master_instance_type}" - ami = "${data.aws_ami.centos7.id}" + ami = "${data.aws_ami.centos7.id}" - key_name = "${aws_key_pair.keypair.key_name}" + key_name = "${aws_key_pair.keypair.key_name}" - subnet_id = "${var.master_subnet_id}" + subnet_id = "${var.master_subnet_id}" - associate_public_ip_address = false + associate_public_ip_address = false - vpc_security_group_ids = [ - "${aws_security_group.kubernetes.id}" - ] + vpc_security_group_ids = [ + "${aws_security_group.kubernetes.id}", + ] - iam_instance_profile = "${aws_iam_instance_profile.master_profile.name}" + iam_instance_profile = "${aws_iam_instance_profile.master_profile.name}" - user_data = "${data.template_cloudinit_config.master_cloud_init.rendered}" + user_data = "${data.template_cloudinit_config.master_cloud_init.rendered}" - tags = "${merge(map("Name", join("-", list(var.cluster_name, "master")), format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" + tags = "${merge(map("Name", join("-", list(var.cluster_name, "master")), format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" - root_block_device { - volume_type = "gp2" - volume_size = "50" - delete_on_termination = true - } + root_block_device { + volume_type = "gp2" + volume_size = "50" + delete_on_termination = true + } - lifecycle { - ignore_changes = [ - "ami", - "user_data", - "associate_public_ip_address" - ] - } + lifecycle { + ignore_changes = [ + "ami", + "user_data", + "associate_public_ip_address", + ] + } } resource "aws_eip_association" "master_assoc" { @@ -323,14 +323,14 @@ resource "aws_eip_association" "master_assoc" { ##### resource "aws_launch_configuration" "nodes" { - name = "${var.cluster_name}-nodes" - image_id = "${data.aws_ami.centos7.id}" - instance_type = "${var.worker_instance_type}" - key_name = "${aws_key_pair.keypair.key_name}" + name = "${var.cluster_name}-nodes" + image_id = "${data.aws_ami.centos7.id}" + instance_type = "${var.worker_instance_type}" + key_name = "${aws_key_pair.keypair.key_name}" iam_instance_profile = "${aws_iam_instance_profile.node_profile.name}" security_groups = [ - "${aws_security_group.kubernetes.id}" + "${aws_security_group.kubernetes.id}", ] associate_public_ip_address = true @@ -338,37 +338,38 @@ resource "aws_launch_configuration" "nodes" { user_data = "${data.template_cloudinit_config.node_cloud_init.rendered}" root_block_device { - volume_type = "gp2" - volume_size = "50" - delete_on_termination = true + volume_type = "gp2" + volume_size = "50" + delete_on_termination = true } lifecycle { create_before_destroy = true + ignore_changes = [ - "user_data" + "user_data", ] } } resource "aws_autoscaling_group" "nodes" { vpc_zone_identifier = ["${var.worker_subnet_ids}"] - - name = "${var.cluster_name}-nodes" - max_size = "${var.max_worker_count}" - min_size = "${var.min_worker_count}" - desired_capacity = "${var.min_worker_count}" - launch_configuration = "${aws_launch_configuration.nodes.name}" + + name = "${var.cluster_name}-nodes" + max_size = "${var.max_worker_count}" + min_size = "${var.min_worker_count}" + desired_capacity = "${var.min_worker_count}" + launch_configuration = "${aws_launch_configuration.nodes.name}" tags = [{ - key = "Name" - value = "${var.cluster_name}-node" + key = "Name" + value = "${var.cluster_name}-node" propagate_at_launch = true }] tags = [{ - key = "kubernetes.io/cluster/${var.cluster_name}" - value = "owned" + key = "kubernetes.io/cluster/${var.cluster_name}" + value = "owned" propagate_at_launch = true }] @@ -376,7 +377,7 @@ resource "aws_autoscaling_group" "nodes" { lifecycle { ignore_changes = ["desired_capacity"] - } + } } ##### @@ -392,6 +393,6 @@ resource "aws_route53_record" "master" { zone_id = "${data.aws_route53_zone.dns_zone.zone_id}" name = "${var.cluster_name}.${var.hosted_zone}" type = "A" - records = ["${aws_eip.master.public_ip}"] + records = ["${aws_instance.master.private_ip}"] ttl = 300 } diff --git a/outputs.tf b/outputs.tf index c31cfb1..e672010 100644 --- a/outputs.tf +++ b/outputs.tf @@ -3,26 +3,31 @@ ##### output "ssh_user" { - description = "SSH user to download kubeconfig file" - value = "centos" + description = "SSH user to download kubeconfig file" + value = "centos" } output "public_ip" { - description = "Cluster IP address" - value = "${aws_eip.master.public_ip}" + description = "Cluster IP address" + value = "${aws_eip.master.public_ip}" +} + +output "private_ip" { + description = "Cluster private IP address" + value = "${aws_instance.master.private_ip}" } output "dns" { - description = "Cluster DNS address" - value = "${aws_route53_record.master.fqdn}" + description = "Cluster DNS address" + value = "${aws_route53_record.master.fqdn}" } output "kubeconfig_dns" { - description = "Path to the the kubeconfig file using DNS address" - value = "/home/centos/kubeconfig" + description = "Path to the the kubeconfig file using DNS address" + value = "/home/centos/kubeconfig" } output "kubeconfig_ip" { - description = "Path to the kubeconfig file using IP address" - value = "/home/centos/kubeconfig_ip" + description = "Path to the kubeconfig file using IP address" + value = "/home/centos/kubeconfig_ip" } diff --git a/variables.tf b/variables.tf index 6f3b0ac..c76163b 100644 --- a/variables.tf +++ b/variables.tf @@ -1,80 +1,87 @@ variable "aws_region" { - description = "Region where Cloud Formation is created" - default = "eu-central-1" + description = "Region where Cloud Formation is created" + default = "eu-central-1" +} + +variable "aws_profile" { + description = "AWS credentials profile to use" + default = "default" } variable "cluster_name" { - description = "Name of the AWS Kubernetes cluster - will be used to name all created resources" + description = "Name of the AWS Kubernetes cluster - will be used to name all created resources" } variable "tags" { - description = "Tags used for the AWS resources created by this template" - type = "map" + description = "Tags used for the AWS resources created by this template" + type = "map" } variable "tags2" { - description = "Tags in format used for the AWS Autoscaling Group" - type = "list" + description = "Tags in format used for the AWS Autoscaling Group" + type = "list" } variable "addons" { - description = "list of YAML files with Kubernetes addons which should be installed" - type = "list" + description = "list of YAML files with Kubernetes addons which should be installed" + type = "list" } variable "master_instance_type" { - description = "Type of instance for master" - default = "t2.medium" + description = "Type of instance for master" + default = "t2.medium" } variable "worker_instance_type" { - description = "Type of instance for workers" - default = "t2.medium" + description = "Type of instance for workers" + default = "t2.medium" } variable "master_subnet_id" { - description = "The subnet-id to be used for the master instance. Master can be only in single subnet. All subnets have to belong to the same VPC." + description = "The subnet-id to be used for the master instance. Master can be only in single subnet. All subnets have to belong to the same VPC." } -variable "worker_subnet_ids" { - description = "The subnet-ids to be used for the worker instances. Workers can be in multiple subnets. Worker subnets can contain also the master subnet. If you want to run workers in different subnet(s) than master you have to tag the subnets with kubernetes.io/cluster/{cluster_name}=shared. All subnets have to belong to the same VPC." - type = "list" +variable "worker_subnet_ids" { + description = "The subnet-ids to be used for the worker instances. Workers can be in multiple subnets. Worker subnets can contain also the master subnet. If you want to run workers in different subnet(s) than master you have to tag the subnets with kubernetes.io/cluster/{cluster_name}=shared. All subnets have to belong to the same VPC." + type = "list" } variable "min_worker_count" { - description = "Minimal number of worker nodes" + description = "Minimal number of worker nodes" } variable "max_worker_count" { - description = "Maximal number of worker nodes" + description = "Maximal number of worker nodes" } variable "ssh_public_key" { - description = "Path to the pulic part of SSH key which should be used for the instance" - default = "~/.ssh/id_rsa.pub" + description = "Path to the pulic part of SSH key which should be used for the instance" + default = "~/.ssh/id_rsa.pub" } variable "hosted_zone" { - description = "Hosted zone to be used for the alias" + description = "Hosted zone to be used for the alias" } variable "hosted_zone_private" { - description = "Is the hosted zone public or private" - default = false + description = "Is the hosted zone public or private" + default = false } variable ssh_access_cidr { description = "List of CIDRs from which SSH access is allowed" - type = "list" + type = "list" + default = [ - "0.0.0.0/0" + "0.0.0.0/0", ] } variable api_access_cidr { description = "List of CIDRs from which API access is allowed" - type = "list" + type = "list" + default = [ - "0.0.0.0/0" + "0.0.0.0/0", ] -} \ No newline at end of file +} From 9d6cd87f9ee28d70c149ff8c26210ec50720d421 Mon Sep 17 00:00:00 2001 From: Kyle Smith Date: Fri, 8 Jun 2018 14:27:42 -0400 Subject: [PATCH 2/3] remove whitespace changes --- main.tf | 162 +++++++++++++++++++++++++-------------------------- outputs.tf | 24 ++++---- variables.tf | 62 ++++++++++---------- 3 files changed, 123 insertions(+), 125 deletions(-) diff --git a/main.tf b/main.tf index 4b7d0d1..6a2d69d 100644 --- a/main.tf +++ b/main.tf @@ -4,7 +4,7 @@ # Retrieve AWS credentials from env variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY provider "aws" { - region = "${var.aws_region}" + region = "${var.aws_region}" profile = "${var.aws_profile}" } @@ -62,7 +62,7 @@ resource "aws_iam_policy_attachment" "master-attach" { } resource "aws_iam_instance_profile" "master_profile" { - name = "${var.cluster_name}-master" + name = "${var.cluster_name}-master" role = "${aws_iam_role.master_role.name}" } @@ -108,7 +108,7 @@ resource "aws_iam_policy_attachment" "node-attach" { } resource "aws_iam_instance_profile" "node_profile" { - name = "${var.cluster_name}-node" + name = "${var.cluster_name}-node" role = "${aws_iam_role.node_role.name}" } @@ -123,51 +123,51 @@ data "aws_subnet" "cluster_subnet" { resource "aws_security_group" "kubernetes" { vpc_id = "${data.aws_subnet.cluster_subnet.vpc_id}" - name = "${var.cluster_name}" + name = "${var.cluster_name}" tags = "${merge(map("Name", var.cluster_name, format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" } # Allow outgoing connectivity resource "aws_security_group_rule" "allow_all_outbound_from_kubernetes" { - type = "egress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.kubernetes.id}" + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow SSH connections only from specific CIDR (TODO) resource "aws_security_group_rule" "allow_ssh_from_cidr" { - count = "${length(var.ssh_access_cidr)}" - type = "ingress" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["${var.ssh_access_cidr[count.index]}"] - security_group_id = "${aws_security_group.kubernetes.id}" + count = "${length(var.ssh_access_cidr)}" + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["${var.ssh_access_cidr[count.index]}"] + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow the security group members to talk with each other without restrictions resource "aws_security_group_rule" "allow_cluster_crosstalk" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "-1" - source_security_group_id = "${aws_security_group.kubernetes.id}" - security_group_id = "${aws_security_group.kubernetes.id}" + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + source_security_group_id = "${aws_security_group.kubernetes.id}" + security_group_id = "${aws_security_group.kubernetes.id}" } # Allow API connections only from specific CIDR (TODO) resource "aws_security_group_rule" "allow_api_from_cidr" { - count = "${length(var.api_access_cidr)}" - type = "ingress" - from_port = 6443 - to_port = 6443 - protocol = "tcp" - cidr_blocks = ["${var.api_access_cidr[count.index]}"] - security_group_id = "${aws_security_group.kubernetes.id}" + count = "${length(var.api_access_cidr)}" + type = "ingress" + from_port = 6443 + to_port = 6443 + protocol = "tcp" + cidr_blocks = ["${var.api_access_cidr[count.index]}"] + security_group_id = "${aws_security_group.kubernetes.id}" } ########## @@ -188,6 +188,7 @@ data "template_file" "init_master" { asg_min_nodes = "${var.min_worker_count}" asg_max_nodes = "${var.max_worker_count}" aws_subnets = "${join(" ", concat(var.worker_subnet_ids, list(var.master_subnet_id)))}" + } } @@ -201,11 +202,11 @@ data "template_file" "init_node" { } data "template_file" "cloud_init_config" { - template = "${file("${path.module}/scripts/cloud-init-config.yaml")}" + template = "${file("${path.module}/scripts/cloud-init-config.yaml")}" - vars { - calico_yaml = "${base64gzip("${file("${path.module}/scripts/calico.yaml")}")}" - } + vars { + calico_yaml = "${base64gzip("${file("${path.module}/scripts/calico.yaml")}")}" + } } data "template_cloudinit_config" "master_cloud_init" { @@ -241,7 +242,7 @@ data "template_cloudinit_config" "node_cloud_init" { ########## resource "aws_key_pair" "keypair" { - key_name = "${var.cluster_name}" + key_name = "${var.cluster_name}" public_key = "${file(var.ssh_public_key)}" } @@ -251,7 +252,7 @@ resource "aws_key_pair" "keypair" { data "aws_ami" "centos7" { most_recent = true - owners = ["aws-marketplace"] + owners = ["aws-marketplace"] filter { name = "product-code" @@ -274,43 +275,43 @@ data "aws_ami" "centos7" { ##### resource "aws_eip" "master" { - vpc = true + vpc = true } resource "aws_instance" "master" { - instance_type = "${var.master_instance_type}" + instance_type = "${var.master_instance_type}" - ami = "${data.aws_ami.centos7.id}" + ami = "${data.aws_ami.centos7.id}" - key_name = "${aws_key_pair.keypair.key_name}" + key_name = "${aws_key_pair.keypair.key_name}" - subnet_id = "${var.master_subnet_id}" + subnet_id = "${var.master_subnet_id}" - associate_public_ip_address = false + associate_public_ip_address = false - vpc_security_group_ids = [ - "${aws_security_group.kubernetes.id}", - ] + vpc_security_group_ids = [ + "${aws_security_group.kubernetes.id}" + ] - iam_instance_profile = "${aws_iam_instance_profile.master_profile.name}" + iam_instance_profile = "${aws_iam_instance_profile.master_profile.name}" - user_data = "${data.template_cloudinit_config.master_cloud_init.rendered}" + user_data = "${data.template_cloudinit_config.master_cloud_init.rendered}" - tags = "${merge(map("Name", join("-", list(var.cluster_name, "master")), format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" + tags = "${merge(map("Name", join("-", list(var.cluster_name, "master")), format("kubernetes.io/cluster/%v", var.cluster_name), "owned"), var.tags)}" - root_block_device { - volume_type = "gp2" - volume_size = "50" - delete_on_termination = true - } + root_block_device { + volume_type = "gp2" + volume_size = "50" + delete_on_termination = true + } - lifecycle { - ignore_changes = [ - "ami", - "user_data", - "associate_public_ip_address", - ] - } + lifecycle { + ignore_changes = [ + "ami", + "user_data", + "associate_public_ip_address" + ] + } } resource "aws_eip_association" "master_assoc" { @@ -323,14 +324,14 @@ resource "aws_eip_association" "master_assoc" { ##### resource "aws_launch_configuration" "nodes" { - name = "${var.cluster_name}-nodes" - image_id = "${data.aws_ami.centos7.id}" - instance_type = "${var.worker_instance_type}" - key_name = "${aws_key_pair.keypair.key_name}" + name = "${var.cluster_name}-nodes" + image_id = "${data.aws_ami.centos7.id}" + instance_type = "${var.worker_instance_type}" + key_name = "${aws_key_pair.keypair.key_name}" iam_instance_profile = "${aws_iam_instance_profile.node_profile.name}" security_groups = [ - "${aws_security_group.kubernetes.id}", + "${aws_security_group.kubernetes.id}" ] associate_public_ip_address = true @@ -338,38 +339,37 @@ resource "aws_launch_configuration" "nodes" { user_data = "${data.template_cloudinit_config.node_cloud_init.rendered}" root_block_device { - volume_type = "gp2" - volume_size = "50" - delete_on_termination = true + volume_type = "gp2" + volume_size = "50" + delete_on_termination = true } lifecycle { create_before_destroy = true - ignore_changes = [ - "user_data", + "user_data" ] } } resource "aws_autoscaling_group" "nodes" { vpc_zone_identifier = ["${var.worker_subnet_ids}"] - - name = "${var.cluster_name}-nodes" - max_size = "${var.max_worker_count}" - min_size = "${var.min_worker_count}" - desired_capacity = "${var.min_worker_count}" - launch_configuration = "${aws_launch_configuration.nodes.name}" + + name = "${var.cluster_name}-nodes" + max_size = "${var.max_worker_count}" + min_size = "${var.min_worker_count}" + desired_capacity = "${var.min_worker_count}" + launch_configuration = "${aws_launch_configuration.nodes.name}" tags = [{ - key = "Name" - value = "${var.cluster_name}-node" + key = "Name" + value = "${var.cluster_name}-node" propagate_at_launch = true }] tags = [{ - key = "kubernetes.io/cluster/${var.cluster_name}" - value = "owned" + key = "kubernetes.io/cluster/${var.cluster_name}" + value = "owned" propagate_at_launch = true }] @@ -377,7 +377,7 @@ resource "aws_autoscaling_group" "nodes" { lifecycle { ignore_changes = ["desired_capacity"] - } + } } ##### diff --git a/outputs.tf b/outputs.tf index e672010..f127f2b 100644 --- a/outputs.tf +++ b/outputs.tf @@ -3,31 +3,31 @@ ##### output "ssh_user" { - description = "SSH user to download kubeconfig file" - value = "centos" + description = "SSH user to download kubeconfig file" + value = "centos" } output "public_ip" { - description = "Cluster IP address" - value = "${aws_eip.master.public_ip}" + description = "Cluster IP address" + value = "${aws_eip.master.public_ip}" } output "private_ip" { - description = "Cluster private IP address" - value = "${aws_instance.master.private_ip}" + description = "Cluster private IP address" + value = "${aws_instance.master.private_ip}" } output "dns" { - description = "Cluster DNS address" - value = "${aws_route53_record.master.fqdn}" + description = "Cluster DNS address" + value = "${aws_route53_record.master.fqdn}" } output "kubeconfig_dns" { - description = "Path to the the kubeconfig file using DNS address" - value = "/home/centos/kubeconfig" + description = "Path to the the kubeconfig file using DNS address" + value = "/home/centos/kubeconfig" } output "kubeconfig_ip" { - description = "Path to the kubeconfig file using IP address" - value = "/home/centos/kubeconfig_ip" + description = "Path to the kubeconfig file using IP address" + value = "/home/centos/kubeconfig_ip" } diff --git a/variables.tf b/variables.tf index c76163b..5539ea8 100644 --- a/variables.tf +++ b/variables.tf @@ -1,87 +1,85 @@ variable "aws_region" { - description = "Region where Cloud Formation is created" - default = "eu-central-1" + description = "Region where Cloud Formation is created" + default = "eu-central-1" } variable "aws_profile" { - description = "AWS credentials profile to use" - default = "default" + description = "AWS credentials profile to use" + default = "default" } variable "cluster_name" { - description = "Name of the AWS Kubernetes cluster - will be used to name all created resources" + description = "Name of the AWS Kubernetes cluster - will be used to name all created resources" } variable "tags" { - description = "Tags used for the AWS resources created by this template" - type = "map" + description = "Tags used for the AWS resources created by this template" + type = "map" } variable "tags2" { - description = "Tags in format used for the AWS Autoscaling Group" - type = "list" + description = "Tags in format used for the AWS Autoscaling Group" + type = "list" } variable "addons" { - description = "list of YAML files with Kubernetes addons which should be installed" - type = "list" + description = "list of YAML files with Kubernetes addons which should be installed" + type = "list" } variable "master_instance_type" { - description = "Type of instance for master" - default = "t2.medium" + description = "Type of instance for master" + default = "t2.medium" } variable "worker_instance_type" { - description = "Type of instance for workers" - default = "t2.medium" + description = "Type of instance for workers" + default = "t2.medium" } variable "master_subnet_id" { - description = "The subnet-id to be used for the master instance. Master can be only in single subnet. All subnets have to belong to the same VPC." + description = "The subnet-id to be used for the master instance. Master can be only in single subnet. All subnets have to belong to the same VPC." } -variable "worker_subnet_ids" { - description = "The subnet-ids to be used for the worker instances. Workers can be in multiple subnets. Worker subnets can contain also the master subnet. If you want to run workers in different subnet(s) than master you have to tag the subnets with kubernetes.io/cluster/{cluster_name}=shared. All subnets have to belong to the same VPC." - type = "list" +variable "worker_subnet_ids" { + description = "The subnet-ids to be used for the worker instances. Workers can be in multiple subnets. Worker subnets can contain also the master subnet. If you want to run workers in different subnet(s) than master you have to tag the subnets with kubernetes.io/cluster/{cluster_name}=shared. All subnets have to belong to the same VPC." + type = "list" } variable "min_worker_count" { - description = "Minimal number of worker nodes" + description = "Minimal number of worker nodes" } variable "max_worker_count" { - description = "Maximal number of worker nodes" + description = "Maximal number of worker nodes" } variable "ssh_public_key" { - description = "Path to the pulic part of SSH key which should be used for the instance" - default = "~/.ssh/id_rsa.pub" + description = "Path to the pulic part of SSH key which should be used for the instance" + default = "~/.ssh/id_rsa.pub" } variable "hosted_zone" { - description = "Hosted zone to be used for the alias" + description = "Hosted zone to be used for the alias" } variable "hosted_zone_private" { - description = "Is the hosted zone public or private" - default = false + description = "Is the hosted zone public or private" + default = false } variable ssh_access_cidr { description = "List of CIDRs from which SSH access is allowed" - type = "list" - + type = "list" default = [ - "0.0.0.0/0", + "0.0.0.0/0" ] } variable api_access_cidr { description = "List of CIDRs from which API access is allowed" - type = "list" - + type = "list" default = [ - "0.0.0.0/0", + "0.0.0.0/0" ] } From 053f17294e4342113a0a9acdcd407146ac0197b2 Mon Sep 17 00:00:00 2001 From: Kyle Smith Date: Tue, 12 Jun 2018 13:39:50 -0400 Subject: [PATCH 3/3] add private IPs to master for all slave subnets, add route53 record for internal master addresses --- main.tf | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/main.tf b/main.tf index 6a2d69d..446c652 100644 --- a/main.tf +++ b/main.tf @@ -179,7 +179,7 @@ data "template_file" "init_master" { vars { kubeadm_token = "${module.kubeadm-token.token}" - dns_name = "${var.cluster_name}.${var.hosted_zone}" + dns_name = "internal.${var.cluster_name}.${var.hosted_zone}" ip_address = "${aws_eip.master.public_ip}" cluster_name = "${var.cluster_name}" addons = "${join(" ", var.addons)}" @@ -188,7 +188,6 @@ data "template_file" "init_master" { asg_min_nodes = "${var.min_worker_count}" asg_max_nodes = "${var.max_worker_count}" aws_subnets = "${join(" ", concat(var.worker_subnet_ids, list(var.master_subnet_id)))}" - } } @@ -197,7 +196,7 @@ data "template_file" "init_node" { vars { kubeadm_token = "${module.kubeadm-token.token}" - dns_name = "${var.cluster_name}.${var.hosted_zone}" + dns_name = "internal.${var.cluster_name}.${var.hosted_zone}" } } @@ -314,6 +313,12 @@ resource "aws_instance" "master" { } } +resource "aws_network_interface" "master_private_interfaces" { + count = "${length(var.worker_subnet_ids)}" + security_groups = ["${aws_security_group.kubernetes.id}"] + subnet_id = "${var.worker_subnet_ids[count.index]}" + } + resource "aws_eip_association" "master_assoc" { instance_id = "${aws_instance.master.id}" allocation_id = "${aws_eip.master.id}" @@ -393,6 +398,14 @@ resource "aws_route53_record" "master" { zone_id = "${data.aws_route53_zone.dns_zone.zone_id}" name = "${var.cluster_name}.${var.hosted_zone}" type = "A" - records = ["${aws_instance.master.private_ip}"] + records = ["${aws_eip.master.public_ip}"] + ttl = 300 +} + +resource "aws_route53_record" "master-internal" { + zone_id = "${data.aws_route53_zone.dns_zone.zone_id}" + name = "internal.${var.cluster_name}.${var.hosted_zone}" + type = "A" + records = ["${aws_network_interface.master_private_interfaces.*.private_ips}"] ttl = 300 }