diff --git a/config/eip.tf b/config/eip.tf index b31904a..f3e0ee9 100644 --- a/config/eip.tf +++ b/config/eip.tf @@ -1,13 +1,15 @@ resource "aws_eip" "cluster_loadbalancer_eip" { + count = var.enable_eip ? 1 : 0 + vpc = true public_ipv4_pool = "amazon" tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-loadbalancer-eip" }), var.common_tags) } output "radar_base_eip_allocation_id" { - value = aws_eip.cluster_loadbalancer_eip.allocation_id + value = var.enable_eip ? aws_eip.cluster_loadbalancer_eip[0].allocation_id : null } output "radar_base_eip_public_dns" { - value = aws_eip.cluster_loadbalancer_eip.public_dns + value = var.enable_eip ? aws_eip.cluster_loadbalancer_eip[0].public_dns : null } \ No newline at end of file diff --git a/config/karpenter.tf b/config/karpenter.tf index c444128..8822287 100644 --- a/config/karpenter.tf +++ b/config/karpenter.tf @@ -1,4 +1,6 @@ module "karpenter" { + count = var.enable_karpenter ? 1 : 0 + source = "terraform-aws-modules/eks/aws//modules/karpenter" version = "19.17.2" @@ -14,6 +16,8 @@ module "karpenter" { } resource "helm_release" "karpenter" { + count = var.enable_karpenter ? 1 : 0 + namespace = "karpenter" create_namespace = true @@ -34,21 +38,23 @@ resource "helm_release" "karpenter" { set { name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" - value = module.karpenter.irsa_arn + value = module.karpenter[0].irsa_arn } set { name = "settings.aws.defaultInstanceProfile" - value = module.karpenter.instance_profile_name + value = module.karpenter[0].instance_profile_name } set { name = "settings.aws.interruptionQueueName" - value = module.karpenter.queue_name + value = module.karpenter[0].queue_name } } resource "kubectl_manifest" "karpenter_provisioner" { + count = var.enable_karpenter ? 1 : 0 + yaml_body = <<-YAML apiVersion: karpenter.sh/v1alpha5 kind: Provisioner @@ -89,6 +95,8 @@ resource "kubectl_manifest" "karpenter_provisioner" { } resource "kubectl_manifest" "karpenter_node_template" { + count = var.enable_karpenter ? 1 : 0 + yaml_body = <<-YAML apiVersion: karpenter.k8s.aws/v1alpha1 kind: AWSNodeTemplate diff --git a/config/msk.tf b/config/msk.tf index 29f7c7f..1c07f49 100644 --- a/config/msk.tf +++ b/config/msk.tf @@ -1,4 +1,6 @@ resource "aws_iam_role" "msk_role" { + count = var.enable_msk ? 1 : 0 + name = "${var.eks_cluster_name}-msk-role" assume_role_policy = jsonencode({ @@ -18,12 +20,17 @@ resource "aws_iam_role" "msk_role" { } resource "aws_iam_role_policy_attachment" "msk_policy_attachment" { + count = var.enable_msk ? 1 : 0 + policy_arn = "arn:aws:iam::aws:policy/AmazonMSKFullAccess" - role = aws_iam_role.msk_role.name + role = aws_iam_role.msk_role[0].name } resource "aws_security_group" "msk_cluster_access" { + count = var.enable_msk ? 1 : 0 + name_prefix = "${var.eks_cluster_name}-msk-" + description = "This security group is for accessing the MSK cluster" vpc_id = data.aws_vpc.main.id @@ -45,6 +52,8 @@ resource "aws_security_group" "msk_cluster_access" { } resource "aws_msk_configuration" "msk_configuration" { + count = var.enable_msk ? 1 : 0 + kafka_versions = [var.kafka_version] name = "${var.eks_cluster_name}-msk-configuration" @@ -66,7 +75,10 @@ PROPERTIES } resource "aws_msk_cluster" "msk_cluster" { - cluster_name = "${var.eks_cluster_name}-msk-cluster" + count = var.enable_msk ? 1 : 0 + + cluster_name = "${var.eks_cluster_name}-msk-cluster" + kafka_version = var.kafka_version number_of_broker_nodes = 3 enhanced_monitoring = "DEFAULT" @@ -79,7 +91,7 @@ resource "aws_msk_cluster" "msk_cluster" { } } client_subnets = data.aws_subnets.private.ids - security_groups = [data.aws_security_group.vpc_default.id, aws_security_group.msk_cluster_access.id] + security_groups = [data.aws_security_group.vpc_default.id, aws_security_group.msk_cluster_access[0].id] } encryption_info { @@ -108,15 +120,15 @@ resource "aws_msk_cluster" "msk_cluster" { } configuration_info { - arn = aws_msk_configuration.msk_configuration.arn + arn = aws_msk_configuration.msk_configuration[0].arn revision = 1 } } output "radar_base_msk_bootstrap_brokers" { - value = aws_msk_cluster.msk_cluster.bootstrap_brokers_tls + value = var.enable_msk ? aws_msk_cluster.msk_cluster[0].bootstrap_brokers_tls : null } output "radar_base_msk_zookeeper_connect" { - value = aws_msk_cluster.msk_cluster.zookeeper_connect_string + value = var.enable_msk ? aws_msk_cluster.msk_cluster[0].zookeeper_connect_string : null } diff --git a/config/rds.tf b/config/rds.tf index 7880983..e03e79b 100644 --- a/config/rds.tf +++ b/config/rds.tf @@ -1,9 +1,13 @@ resource "aws_db_subnet_group" "rds_subnet" { + count = var.enable_rds ? 1 : 0 + name = "${var.eks_cluster_name}-rds-subnet" subnet_ids = data.aws_subnets.private.ids } resource "aws_security_group" "rds_access" { + count = var.enable_rds ? 1 : 0 + name_prefix = "${var.eks_cluster_name}-" description = "This security group is for accessing the RDS DB" vpc_id = data.aws_vpc.main.id @@ -34,6 +38,8 @@ resource "aws_security_group" "rds_access" { } resource "aws_db_instance" "radar_postgres" { + count = var.enable_rds ? 1 : 0 + identifier = "${var.eks_cluster_name}-postgres" db_name = "radarbase" engine = "postgres" @@ -47,14 +53,16 @@ resource "aws_db_instance" "radar_postgres" { skip_final_snapshot = true publicly_accessible = false multi_az = false - db_subnet_group_name = aws_db_subnet_group.rds_subnet.name - vpc_security_group_ids = [aws_security_group.rds_access.id] + db_subnet_group_name = aws_db_subnet_group.rds_subnet[0].name + vpc_security_group_ids = [aws_security_group.rds_access[0].id] performance_insights_enabled = true tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-postgres" }), var.common_tags) } resource "kubectl_manifest" "create_databases" { + count = var.enable_rds ? 1 : 0 + yaml_body = <<-YAML apiVersion: batch/v1 kind: Job @@ -70,9 +78,9 @@ resource "kubectl_manifest" "create_databases" { - "bash" - "-c" - | - PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres.address} --port=5432 --username=${aws_db_instance.radar_postgres.username} --dbname=radarbase -c 'CREATE DATABASE managementportal;' - PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres.address} --port=5432 --username=${aws_db_instance.radar_postgres.username} --dbname=radarbase -c 'CREATE DATABASE appserver;' - PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres.address} --port=5432 --username=${aws_db_instance.radar_postgres.username} --dbname=radarbase -c 'CREATE DATABASE rest_sources_auth;' + PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres[0].address} --port=5432 --username=${aws_db_instance.radar_postgres[0].username} --dbname=radarbase -c 'CREATE DATABASE managementportal;' + PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres[0].address} --port=5432 --username=${aws_db_instance.radar_postgres[0].username} --dbname=radarbase -c 'CREATE DATABASE appserver;' + PGPASSWORD=${var.radar_postgres_password} psql --host=${aws_db_instance.radar_postgres[0].address} --port=5432 --username=${aws_db_instance.radar_postgres[0].username} --dbname=radarbase -c 'CREATE DATABASE rest_sources_auth;' restartPolicy: Never YAML @@ -82,52 +90,52 @@ resource "kubectl_manifest" "create_databases" { } output "radar_base_rds_managementportal_host" { - value = aws_db_instance.radar_postgres.address + value = var.enable_rds ? aws_db_instance.radar_postgres[0].address : null } output "radar_base_rds_managementportal_port" { - value = aws_db_instance.radar_postgres.port + value = var.enable_rds ? aws_db_instance.radar_postgres[0].port : null } output "radar_base_rds_managementportal_username" { - value = aws_db_instance.radar_postgres.username + value = var.enable_rds ? aws_db_instance.radar_postgres[0].username : null } output "radar_base_rds_managementportal_password" { - value = aws_db_instance.radar_postgres.password + value = var.enable_rds ? aws_db_instance.radar_postgres[0].password : null sensitive = true } output "radar_base_rds_appserver_host" { - value = aws_db_instance.radar_postgres.address + value = var.enable_rds ? aws_db_instance.radar_postgres[0].address : null } output "radar_base_rds_appserver_port" { - value = aws_db_instance.radar_postgres.port + value = var.enable_rds ? aws_db_instance.radar_postgres[0].port : null } output "radar_base_rds_appserver_username" { - value = aws_db_instance.radar_postgres.username + value = var.enable_rds ? aws_db_instance.radar_postgres[0].username : null } output "radar_base_rds_appserver_password" { - value = aws_db_instance.radar_postgres.password + value = var.enable_rds ? aws_db_instance.radar_postgres[0].password : null sensitive = true } output "radar_base_rds_rest_sources_auth_host" { - value = aws_db_instance.radar_postgres.address + value = var.enable_rds ? aws_db_instance.radar_postgres[0].address : null } output "radar_base_rds_rest_sources_auth_port" { - value = aws_db_instance.radar_postgres.port + value = var.enable_rds ? aws_db_instance.radar_postgres[0].port : null } output "radar_base_rds_rest_sources_auth_username" { - value = aws_db_instance.radar_postgres.username + value = var.enable_rds ? aws_db_instance.radar_postgres[0].username : null } output "radar_base_rds_rest_sources_auth_password" { - value = aws_db_instance.radar_postgres.password + value = var.enable_rds ? aws_db_instance.radar_postgres[0].password : null sensitive = true } diff --git a/config/route53.tf b/config/route53.tf index c09b607..9092d30 100644 --- a/config/route53.tf +++ b/config/route53.tf @@ -1,18 +1,24 @@ resource "aws_route53_zone" "primary" { + count = var.enable_route53 ? 1 : 0 + name = var.domain_name tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-primary-zone" }), var.common_tags) } resource "aws_route53_record" "main" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 && var.enable_eip ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 - records = [aws_eip.cluster_loadbalancer_eip.public_dns] + records = [aws_eip.cluster_loadbalancer_eip[0].public_dns] } resource "aws_route53_record" "alertmanager" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "alertmanager.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -20,7 +26,9 @@ resource "aws_route53_record" "alertmanager" { } resource "aws_route53_record" "dashboard" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "dashboard.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -28,7 +36,9 @@ resource "aws_route53_record" "dashboard" { } resource "aws_route53_record" "grafana" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "grafana.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -36,7 +46,9 @@ resource "aws_route53_record" "grafana" { } resource "aws_route53_record" "graylog" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "graylog.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -44,7 +56,9 @@ resource "aws_route53_record" "graylog" { } resource "aws_route53_record" "prometheus" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "prometheus.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -52,7 +66,9 @@ resource "aws_route53_record" "prometheus" { } resource "aws_route53_record" "s3" { - zone_id = aws_route53_zone.primary.zone_id + count = var.enable_route53 ? 1 : 0 + + zone_id = aws_route53_zone.primary[0].zone_id name = "s3.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 @@ -60,12 +76,14 @@ resource "aws_route53_record" "s3" { } module "external_dns_irsa" { + count = var.enable_route53 ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.0" role_name = "${var.eks_cluster_name}-external-dns-irsa" attach_external_dns_policy = true - external_dns_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary.id}"] + external_dns_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary[0].id}"] oidc_providers = { ex = { @@ -78,12 +96,14 @@ module "external_dns_irsa" { } module "cert_manager_irsa" { + count = var.enable_route53 ? 1 : 0 + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.0" role_name = "${var.eks_cluster_name}-cert-manager-irsa" attach_cert_manager_policy = true - cert_manager_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary.id}"] + cert_manager_hosted_zone_arns = ["arn:aws:route53:::hostedzone/${aws_route53_zone.primary[0].id}"] oidc_providers = { main = { @@ -96,5 +116,5 @@ module "cert_manager_irsa" { } output "radar_base_route53_hosted_zone_id" { - value = aws_route53_zone.primary.zone_id + value = var.enable_route53 ? aws_route53_zone.primary[0].zone_id : null } diff --git a/config/s3.tf b/config/s3.tf index 2c10464..e1007a9 100644 --- a/config/s3.tf +++ b/config/s3.tf @@ -1,4 +1,6 @@ resource "aws_vpc_endpoint" "s3" { + count = var.enable_s3 ? 1 : 0 + vpc_id = data.aws_vpc.main.id service_name = "com.amazonaws.${var.AWS_REGION}.s3" @@ -6,18 +8,24 @@ resource "aws_vpc_endpoint" "s3" { } resource "aws_vpc_endpoint_route_table_association" "route_table_association" { + count = var.enable_s3 ? 1 : 0 + route_table_id = data.aws_vpc.main.main_route_table_id - vpc_endpoint_id = aws_vpc_endpoint.s3.id + vpc_endpoint_id = aws_vpc_endpoint.s3[0].id } resource "aws_s3_bucket" "intermediate_output_storage" { + count = var.enable_s3 ? 1 : 0 + bucket = "${var.eks_cluster_name}-intermediate-output-storage" tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-intermediate-output-storage" }), var.common_tags) } resource "aws_s3_bucket_ownership_controls" "intermediate_output" { - bucket = aws_s3_bucket.intermediate_output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.intermediate_output_storage[0].id rule { object_ownership = "BucketOwnerPreferred" } @@ -26,20 +34,26 @@ resource "aws_s3_bucket_ownership_controls" "intermediate_output" { } resource "aws_s3_bucket_acl" "intermediate_output" { - bucket = aws_s3_bucket.intermediate_output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.intermediate_output_storage[0].id acl = "private" depends_on = [aws_s3_bucket_ownership_controls.intermediate_output] } resource "aws_s3_bucket" "output_storage" { + count = var.enable_s3 ? 1 : 0 + bucket = "${var.eks_cluster_name}-output-storage" tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-output-storage" }), var.common_tags) } resource "aws_s3_bucket_ownership_controls" "output" { - bucket = aws_s3_bucket.output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.output_storage[0].id rule { object_ownership = "BucketOwnerPreferred" } @@ -48,20 +62,26 @@ resource "aws_s3_bucket_ownership_controls" "output" { } resource "aws_s3_bucket_acl" "output" { - bucket = aws_s3_bucket.output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.output_storage[0].id acl = "private" depends_on = [aws_s3_bucket_ownership_controls.output] } resource "aws_s3_bucket" "velero_backups" { + count = var.enable_s3 ? 1 : 0 + bucket = "${var.eks_cluster_name}-velero-backups" tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-velero-backups" }), var.common_tags) } resource "aws_s3_bucket_ownership_controls" "velero" { - bucket = aws_s3_bucket.velero_backups.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.velero_backups[0].id rule { object_ownership = "BucketOwnerPreferred" } @@ -70,14 +90,18 @@ resource "aws_s3_bucket_ownership_controls" "velero" { } resource "aws_s3_bucket_acl" "velero" { - bucket = aws_s3_bucket.velero_backups.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.velero_backups[0].id acl = "private" depends_on = [aws_s3_bucket_ownership_controls.velero] } resource "aws_s3_bucket_server_side_encryption_configuration" "intermediate_output_storage_encryption" { - bucket = aws_s3_bucket.intermediate_output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.intermediate_output_storage[0].id rule { apply_server_side_encryption_by_default { @@ -87,7 +111,9 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "intermediate_outp } resource "aws_s3_bucket_server_side_encryption_configuration" "output_storage_encryption" { - bucket = aws_s3_bucket.output_storage.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.output_storage[0].id rule { apply_server_side_encryption_by_default { @@ -97,7 +123,9 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "output_storage_en } resource "aws_s3_bucket_server_side_encryption_configuration" "velero_backups_encryption" { - bucket = aws_s3_bucket.velero_backups.id + count = var.enable_s3 ? 1 : 0 + + bucket = aws_s3_bucket.velero_backups[0].id rule { apply_server_side_encryption_by_default { @@ -107,13 +135,13 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "velero_backups_en } output "radar_base_s3_intermediate_output_bucket_name" { - value = aws_s3_bucket.intermediate_output_storage.bucket + value = var.enable_s3 ? aws_s3_bucket.intermediate_output_storage[0].bucket : null } output "radar_base_s3_output_bucket_name" { - value = aws_s3_bucket.output_storage.bucket + value = var.enable_s3 ? aws_s3_bucket.intermediate_output_storage[0].bucket : null } output "radar_base_s3_velero_bucket_name" { - value = aws_s3_bucket.velero_backups.bucket + value = var.enable_s3 ? aws_s3_bucket.intermediate_output_storage[0].bucket : null } diff --git a/config/ses.tf b/config/ses.tf index 12ed46a..82d50c5 100644 --- a/config/ses.tf +++ b/config/ses.tf @@ -1,57 +1,71 @@ resource "aws_ses_domain_identity" "smtp_identity" { + count = var.enable_ses ? 1 : 0 + domain = var.domain_name } resource "aws_ses_domain_dkim" "smtp_dkim" { - domain = aws_ses_domain_identity.smtp_identity.domain + count = var.enable_ses ? 1 : 0 + + domain = aws_ses_domain_identity.smtp_identity[0].domain } resource "aws_route53_record" "smtp_dkim_record" { - count = 3 - zone_id = aws_route53_zone.primary.id - name = "${aws_ses_domain_dkim.smtp_dkim.dkim_tokens[count.index]}._domainkey" + count = var.enable_route53 && var.enable_ses ? 3 : 0 + zone_id = aws_route53_zone.primary[0].id + name = "${aws_ses_domain_dkim.smtp_dkim[0].dkim_tokens[count.index]}._domainkey" type = "CNAME" ttl = "600" - records = ["${aws_ses_domain_dkim.smtp_dkim.dkim_tokens[count.index]}.dkim.amazonses.com"] + records = ["${aws_ses_domain_dkim.smtp_dkim[0].dkim_tokens[count.index]}.dkim.amazonses.com"] - depends_on = [aws_route53_zone.primary] + depends_on = [aws_route53_zone.primary[0]] } resource "aws_ses_domain_mail_from" "smtp_mail_from" { - domain = aws_ses_domain_identity.smtp_identity.domain - mail_from_domain = "info.${var.environment}.${aws_ses_domain_identity.smtp_identity.domain}" + count = var.enable_ses ? 1 : 0 + + domain = aws_ses_domain_identity.smtp_identity[0].domain + mail_from_domain = "info.${var.environment}.${aws_ses_domain_identity.smtp_identity[0].domain}" } resource "aws_route53_record" "smtp_mail_from_mx" { - zone_id = aws_route53_zone.primary.id - name = aws_ses_domain_mail_from.smtp_mail_from.mail_from_domain + count = var.enable_route53 && var.enable_ses ? 1 : 0 + zone_id = aws_route53_zone.primary[0].id + name = aws_ses_domain_mail_from.smtp_mail_from[0].mail_from_domain type = "MX" ttl = "600" records = ["10 feedback-smtp.${var.AWS_REGION}.amazonses.com"] - depends_on = [aws_route53_zone.primary] + depends_on = [aws_route53_zone.primary[0]] } resource "aws_route53_record" "smtp_mail_from_txt" { - zone_id = aws_route53_zone.primary.id - name = aws_ses_domain_mail_from.smtp_mail_from.mail_from_domain + count = var.enable_route53 && var.enable_ses ? 1 : 0 + zone_id = aws_route53_zone.primary[0].id + name = aws_ses_domain_mail_from.smtp_mail_from[0].mail_from_domain type = "TXT" ttl = "600" records = ["v=spf1 include:amazonses.com ~all"] - depends_on = [aws_route53_zone.primary] + depends_on = [aws_route53_zone.primary[0]] } resource "aws_iam_user" "smtp_user" { + count = var.enable_ses ? 1 : 0 + name = "${var.eks_cluster_name}-smtp-user" tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-smtp-user" }), var.common_tags) } resource "aws_iam_access_key" "smtp_user_key" { - user = aws_iam_user.smtp_user.name + count = var.enable_ses ? 1 : 0 + + user = aws_iam_user.smtp_user[0].name } resource "aws_iam_policy" "smtp_user_policy" { + count = var.enable_ses ? 1 : 0 + name = "${var.eks_cluster_name}-smtp-user-policy" policy = jsonencode({ @@ -69,16 +83,18 @@ resource "aws_iam_policy" "smtp_user_policy" { } resource "aws_iam_user_policy_attachment" "smtp_user_policy_attach" { - user = aws_iam_user.smtp_user.name - policy_arn = aws_iam_policy.smtp_user_policy.arn + count = var.enable_ses ? 1 : 0 + + user = aws_iam_user.smtp_user[0].name + policy_arn = aws_iam_policy.smtp_user_policy[0].arn } output "radar_base_smtp_username" { - value = aws_iam_access_key.smtp_user_key.id + value = var.enable_ses ? aws_iam_access_key.smtp_user_key[0].id : null } output "radar_base_smtp_password" { - value = aws_iam_access_key.smtp_user_key.ses_smtp_password_v4 + value = var.enable_ses ? aws_iam_access_key.smtp_user_key[0].ses_smtp_password_v4 : null sensitive = true } diff --git a/config/variables.tf b/config/variables.tf index 2dc2cd1..1a652e4 100644 --- a/config/variables.tf +++ b/config/variables.tf @@ -76,6 +76,11 @@ variable "postgres_version" { } +variable "enable_karpenter" { + type = bool + default = false +} + variable "karpenter_version" { type = string default = "v0.29.0" @@ -87,3 +92,33 @@ variable "radar_postgres_password" { default = "change_me" sensitive = true } + +variable "enable_msk" { + type = bool + default = false +} + +variable "enable_rds" { + type = bool + default = false +} + +variable "enable_route53" { + type = bool + default = false +} + +variable "enable_ses" { + type = bool + default = false +} + +variable "enable_s3" { + type = bool + default = false +} + +variable "enable_eip" { + type = bool + default = false +} \ No newline at end of file