Skip to content

Commit

Permalink
WIP: still going / much broken
Browse files Browse the repository at this point in the history
  • Loading branch information
ntwkninja committed Oct 2, 2024
1 parent fd080f4 commit 8397843
Show file tree
Hide file tree
Showing 5 changed files with 363 additions and 8 deletions.
56 changes: 56 additions & 0 deletions locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -246,3 +246,59 @@ locals {
}
}
}

# Common Environments Access Entries
locals {

iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"

admin_user_access_entries = {
for user in var.aws_admin_usernames :
user => {
principal_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${user}"
type = "STANDARD"
policy_associations = {
admin = {
policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
access_scope = {
type = "cluster"
}
}
}
}
}

"bastion" = {
principal_arn = var.bastion_role_arn
type = "STANDARD"
policy_associations = {
admin = {
policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
access_scope = {
type = "cluster"
}
}
}
}

additional_access_entries = {
for index in var.additional_access_entries :
index => {
principal_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${index}"
type = "STANDARD"
policy_associations = {
admin = {
policy_arn = "arn:${data.aws_partition.current.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
access_scope = {
type = "cluster"
}
}
}
}
}

access_entries = merge(
local.admin_user_access_entries,
local.additional_access_entries
)
} # END: Common Environments Access Entries
50 changes: 42 additions & 8 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,19 @@ module "aws_eks" {
iam_role_permissions_boundary = var.iam_role_permissions_boundary
attach_cluster_encryption_policy = var.attach_cluster_encryption_policy

cluster_endpoint_public_access = var.cluster_endpoint_public_access
cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs
cluster_endpoint_private_access = var.cluster_endpoint_private_access

cluster_endpoint_public_access = false
cluster_endpoint_private_access = true

self_managed_node_group_defaults = local.self_managed_node_group_defaults
self_managed_node_groups = local.self_managed_node_groups

dataplane_wait_duration = "30s"
cluster_timeouts = var.cluster_timeouts

cluster_addons = local.cluster_addons

access_entries = var.access_entries
authentication_mode = var.authentication_mode
enable_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions
access_entries = local.access_entries
authentication_mode = "API"
enable_cluster_creator_admin_permissions = true

#----------------------------------------------------------------------------------------------------------#
# Security groups used in this module created by the upstream modules terraform-aws-eks (https://github.com/terraform-aws-modules/terraform-aws-eks).
Expand Down Expand Up @@ -149,6 +146,28 @@ module "self_managed_node_group_secret_key_secrets_manager_secret" {
tags = var.tags
}

#---------------------------------------------------------------
# Self Managed Node Group NLB Security Group Dependencies
#---------------------------------------------------------------

# Define the security group with only egress rules conditionally

resource "aws_security_group" "nlb_sg" {
# checkov:skip=CKV2_AWS_5: This security group gets used when creating NLBs with uds-core.
count = var.nlb_security_groups_required ? 1 : 0

name = "${var.tags.Project}-backend-nlb-sg"
description = "Security group for NLB to Nodes"
vpc_id = local.vpc_id

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

######################################################
# vpc-cni irsa role
######################################################
Expand Down Expand Up @@ -202,3 +221,18 @@ resource "aws_iam_policy" "vpc_cni_logging" {

tags = var.tags
}

resource "aws_security_group" "nlb_sg" {
# checkov:skip=CKV2_AWS_5: This security group gets used when creating NLBs with uds-core.

name = "${local.cluster_name}-backend-nlb-sg"
description = "Security group for NLB to Nodes"
vpc_id = var.vpc_id

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
250 changes: 250 additions & 0 deletions nlb-sg.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,250 @@
# local block intended for static values
locals {

mde_egress_cidrs = [
# MDE public IPs
]

idp_public_ips = [
# Keycloak IdP public IPs
]

}

resource "aws_security_group" "keycloak_sg" {
# checkov:skip=CKV2_AWS_5: "false positive" -- this resource is only created for staging and prod based on the vpc_configs

# Naming convention for the security group.
name = "${local.cluster_name}-${var.kc_sg_name}-${each.value.sg_index + 1}"
description = "Security group for Keycloak with ingress rules"
# Retrieve the VPC ID from the vpc module using the vpc_name.
vpc_id = var.vpc_id

# Dynamically create ingress rules based on the allow_list for each security group.
dynamic "ingress" {
for_each = each.value.allow_list
content {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [ingress.value]
description = "HTTPS ingress for Keycloak"
}
}

ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "HTTP ingress from VPC"
}

ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "HTTPS ingress from VPC"
}

ingress {
from_port = 15021
to_port = 15021
protocol = "tcp"
cidr_blocks = [var.vpc_cidr_block]
description = "Custom port ingress from VPC"
}

egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["0.0.0.0/0"]
description = "Egress All from VPC"
}
}

resource "aws_security_group" "tenant_sg" {
# checkov:skip=CKV2_AWS_5: "false positive" -- this resource is only created for staging and prod based on the vpc_configs

# Use a combination of vpc_name and sg_index to uniquely identify each security group in the for_each loop.
for_each = {
for sg in local.tenant_sg_allow_lists : "${sg.vpc_name}-${sg.sg_index}" => sg
}

# Naming convention for the security group.
name = "${local.cluster_name}-${var.tenant_sg_name}-${each.value.sg_index + 1}"
description = "Security group for tenant ingress-gateway"
# Retrieve the VPC ID from the vpc module using the vpc_name.
vpc_id = var.vpc_id

# Dynamically create ingress rules based on the allow_list for each security group.
dynamic "ingress" {
for_each = each.value.allow_list
content {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [ingress.value]
description = "HTTPS ingress for Tenant ingress-gateway"
}
}

ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "HTTP ingress from VPC"
}

ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "HTTPS ingress from VPC"
}

ingress {
from_port = 15021
to_port = 15021
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "Istio ingress from VPC"
}

egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["0.0.0.0/0"]
description = "Egress All from VPC"
}

tags = merge(var.tags, {
"Name" = "${local.cluster_name}-${var.tenant_sg_name}-${each.value.sg_index + 1}"
})
}

resource "aws_security_group" "admin_sg" {
# checkov:skip=CKV2_AWS_5: "false positive" -- this resource is only created for staging and prod based on the vpc_configs

# Use a combination of vpc_name and sg_index to uniquely identify each security group in the for_each loop.
for_each = {
for sg in local.admin_sg_allow_lists : "${sg.vpc_name}-${sg.sg_index}" => sg
}

# Naming convention for the security group.
name = "${local.cluster_name}-${each.value.vpc_name}-${var.admin_sg_name}-${each.value.sg_index + 1}"
description = "Security group for Keycloak with ingress rules"
# Retrieve the VPC ID from the vpc module using the vpc_name.
vpc_id = var.vpc_id

# Dynamically create ingress rules based on the allow_list for each security group.
dynamic "ingress" {
for_each = each.value.allow_list
content {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [ingress.value]
description = "HTTPS ingress for Keycloak"
}
}

ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "HTTP ingress from VPC"
}

ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "HTTPS ingress from VPC"
}

ingress {
from_port = 15021
to_port = 15021
protocol = "tcp"
cidr_blocks = [module.vpc[local.vpc_name_to_index[each.value.vpc_name]].vpc_cidr_block]
description = "Custom port ingress from VPC"
}

egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["0.0.0.0/0"]
description = "Egress All from VPC"
}

tags = merge(var.tags, {
"Name" = "${local.cluster_name}${var.admin_sg_name}-${each.value.sg_index + 1}"
})
}

resource "aws_security_group" "appstream_users_sgs" {
# checkov:skip=CKV2_AWS_5: This resource is used in operations repo
for_each = {
for vpc in var.vpc_configs : vpc.vpc_name => vpc
if vpc.default_vdi_vpc == true # Only create the security group for the default VDI VPC (which could be either 'VDI-RDTE-VPC' or 'VDI-Production-VPC' in the current configuration)
}

name = "${var.tags.Environment}-${var.tags.Project}-${each.value.vpc_name}-appstream-users-sg"
description = "Security group for regular appstream users"

# Retrieve the VPC ID from the vpc module using the vpc_name.
vpc_id = var.vpc_id

ingress {
description = "TLS from VPC"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = [each.value.vpc_cidr]
}

egress {
description = "EKS private subnets egress to AppStream fleet"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = var.target_eks_private_subnets_cidrs
}

egress {
description = "Outbound to MDE endpoints per onboarding docs"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = local.mde_egress_cidrs
}

egress {
description = "Keycloak endpoint"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = local.idp_public_ips
}
}

output "keycloak_security_group_ids" {
value = values(aws_security_group.keycloak_sg)[*].id
}

output "tenant_security_group_ids" {
value = values(aws_security_group.tenant_sg)[*].id
}

output "admin_security_group_ids" {
value = values(aws_security_group.admin_sg)[*].id
}
4 changes: 4 additions & 0 deletions outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -241,3 +241,7 @@ output "eks_addons_gitops_metadata" {
description = ""
value = try(module.eks_blueprints_kubernetes_addons.gitops_metadata, null)
}

output "nlb_sg_id" {
value = aws_security_group.nlb_sg.id
}
Loading

0 comments on commit 8397843

Please sign in to comment.