diff --git a/_sub/compute/k8s-blaster-namespace/dependencies.tf b/_sub/compute/k8s-blaster-namespace/dependencies.tf new file mode 100644 index 000000000..0514ede4e --- /dev/null +++ b/_sub/compute/k8s-blaster-namespace/dependencies.tf @@ -0,0 +1,67 @@ + +data "aws_caller_identity" "workload_account" { +} + +locals { + k8s_janitor_serviceaccount_name = "k8s-janitor-sa" +} + +data "aws_iam_policy_document" "k8s_janitor" { + count = var.deploy ? 1 : 0 + statement { + effect = "Allow" + + actions = [ + "s3:AbortMultipartUpload", + "s3:DeleteObject", + "s3:GetObject", + "s3:GetObjectAcl", + "s3:GetObjectTagging", + "s3:GetObjectTorrent", + "s3:GetObjectVersion", + "s3:GetObjectVersionAcl", + "s3:GetObjectVersionTagging", + "s3:GetObjectVersionTorrent", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutObjectTagging" + ] + + resources = ["arn:aws:s3:::dfds-oxygen-k8s-hellman/*"] + } + + statement { + effect = "Allow" + + actions = [ + "s3:ListBucket" + ] + + resources = ["arn:aws:s3:::dfds-oxygen-k8s-hellman"] + } +} + +data "aws_iam_policy_document" "k8s_janitor_trust" { + count = var.deploy ? 1 : 0 + statement { + effect = "Allow" + + principals { + type = "Federated" + + identifiers = [ + "arn:aws:iam::${data.aws_caller_identity.workload_account.account_id}:oidc-provider/${var.oidc_issuer}", + ] + } + + condition { + test = "StringEquals" + values = ["system:serviceaccount:${kubernetes_namespace.self_service[0].metadata[0].name}:${local.k8s_janitor_serviceaccount_name}"] + variable = "${var.oidc_issuer}:sub" + } + + actions = ["sts:AssumeRoleWithWebIdentity"] + } +} diff --git a/_sub/compute/k8s-blaster-namespace/main.tf b/_sub/compute/k8s-blaster-namespace/main.tf index 812517482..0b590632a 100644 --- a/_sub/compute/k8s-blaster-namespace/main.tf +++ b/_sub/compute/k8s-blaster-namespace/main.tf @@ -145,3 +145,27 @@ resource "aws_iam_role_policy_attachment" "param-store" { role = element(concat(aws_iam_role.self_service.*.name, [""]), 0) policy_arn = element(concat(aws_iam_policy.param_store.*.arn, [""]), 0) } + +# -------------------------------------------------- +# k8s-janitor IAM role +# -------------------------------------------------- + +locals { + k8s_janitor_iam_role_name = "k8s-janitor" +} + +resource "aws_iam_role" "k8s_janitor" { + count = var.deploy ? 1 : 0 + name = local.k8s_janitor_iam_role_name + path = "/" + description = "Role for k8s-janitor to manage S3 buckets within its path" + assume_role_policy = data.aws_iam_policy_document.k8s_janitor_trust[0].json + max_session_duration = 3600 +} + +resource "aws_iam_role_policy" "k8s_janitor" { + count = var.deploy ? 1 : 0 + name = local.k8s_janitor_iam_role_name + role = aws_iam_role.k8s_janitor[0].id + policy = data.aws_iam_policy_document.k8s_janitor[0].json +} \ No newline at end of file diff --git a/_sub/compute/k8s-blaster-namespace/vars.tf b/_sub/compute/k8s-blaster-namespace/vars.tf index 8e2a87f71..60cda35dd 100644 --- a/_sub/compute/k8s-blaster-namespace/vars.tf +++ b/_sub/compute/k8s-blaster-namespace/vars.tf @@ -9,9 +9,18 @@ variable "cluster_name" { variable "blaster_configmap_bucket" { } +variable "oidc_issuer" { + type = string + description = "Used for iam policy oidc trust" + validation { + condition = substr(var.oidc_issuer, 0, 8) != "https://" + error_message = "Oidc_issuer may not contain https:// in the start of the variable." + } +} + variable "extra_permitted_roles" { - type = list(string) - default = [] + type = list(string) + default = [] description = "Additional role ARNs that can be assumed from this namespace through KIAM" validation { condition = var.extra_permitted_roles == [] ? true : ( diff --git a/compute/k8s-services/dependencies.tf b/compute/k8s-services/dependencies.tf index ba6364955..486dab903 100644 --- a/compute/k8s-services/dependencies.tf +++ b/compute/k8s-services/dependencies.tf @@ -162,8 +162,8 @@ locals { # -------------------------------------------------- locals { - grafana_iam_role_name = "${var.eks_cluster_name}-monitoring-grafana-cloudwatch" - grafana_iam_role_arn = "arn:aws:iam::${var.aws_workload_account_id}:role/${local.grafana_iam_role_name}" + grafana_iam_role_name = "${var.eks_cluster_name}-monitoring-grafana-cloudwatch" + grafana_iam_role_arn = "arn:aws:iam::${var.aws_workload_account_id}:role/${local.grafana_iam_role_name}" } # -------------------------------------------------- @@ -209,13 +209,13 @@ data "aws_iam_policy_document" "cloudwatch_metrics_trust" { } condition { - test = "StringEquals" - values = ["system:serviceaccount:${local.monitoring_namespace_name}:${var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name}"] + test = "StringEquals" + values = ["system:serviceaccount:${local.monitoring_namespace_name}:${var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name}"] variable = "${local.oidc_issuer}:sub" } actions = ["sts:AssumeRoleWithWebIdentity"] -} + } } # --------------------------------------------------------------------- diff --git a/compute/k8s-services/main.tf b/compute/k8s-services/main.tf index 6a70a10c5..d0b544c1b 100644 --- a/compute/k8s-services/main.tf +++ b/compute/k8s-services/main.tf @@ -330,6 +330,7 @@ module "blaster_namespace" { blaster_configmap_bucket = data.terraform_remote_state.cluster.outputs.blaster_configmap_bucket kiam_server_role_arn = module.kiam_deploy.server_role_arn extra_permitted_roles = var.blaster_namespace_extra_permitted_roles + oidc_issuer = local.oidc_issuer } @@ -365,9 +366,9 @@ module "cloudwatch_alarm_alb_targets_health" { # -------------------------------------------------- module "monitoring_namespace" { - source = "../../_sub/compute/k8s-namespace" - count = var.monitoring_namespace_deploy ? 1 : 0 - name = local.monitoring_namespace_name + source = "../../_sub/compute/k8s-namespace" + count = var.monitoring_namespace_deploy ? 1 : 0 + name = local.monitoring_namespace_name } @@ -392,31 +393,31 @@ module "monitoring_goldpinger" { # -------------------------------------------------- module "monitoring_kube_prometheus_stack" { - source = "../../_sub/compute/helm-kube-prometheus-stack" - count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0 - cluster_name = var.eks_cluster_name - chart_version = var.monitoring_kube_prometheus_stack_chart_version - namespace = module.monitoring_namespace[0].name - priority_class = var.monitoring_kube_prometheus_stack_priority_class - grafana_admin_password = var.monitoring_kube_prometheus_stack_grafana_admin_password - grafana_ingress_path = var.monitoring_kube_prometheus_stack_grafana_ingress_path - grafana_host = "grafana.${var.eks_cluster_name}.${var.workload_dns_zone_name}" - grafana_notifier_name = "${var.eks_cluster_name}-alerting" - grafana_iam_role_arn = local.grafana_iam_role_arn # Coming from locals to avoid circular dependency between KIAM and Prometheus + source = "../../_sub/compute/helm-kube-prometheus-stack" + count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0 + cluster_name = var.eks_cluster_name + chart_version = var.monitoring_kube_prometheus_stack_chart_version + namespace = module.monitoring_namespace[0].name + priority_class = var.monitoring_kube_prometheus_stack_priority_class + grafana_admin_password = var.monitoring_kube_prometheus_stack_grafana_admin_password + grafana_ingress_path = var.monitoring_kube_prometheus_stack_grafana_ingress_path + grafana_host = "grafana.${var.eks_cluster_name}.${var.workload_dns_zone_name}" + grafana_notifier_name = "${var.eks_cluster_name}-alerting" + grafana_iam_role_arn = local.grafana_iam_role_arn # Coming from locals to avoid circular dependency between KIAM and Prometheus grafana_serviceaccount_name = var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name - slack_webhook = var.monitoring_kube_prometheus_stack_slack_webhook - prometheus_storageclass = var.monitoring_kube_prometheus_stack_prometheus_storageclass - prometheus_storage_size = var.monitoring_kube_prometheus_stack_prometheus_storage_size - prometheus_retention = var.monitoring_kube_prometheus_stack_prometheus_retention - slack_channel = var.monitoring_kube_prometheus_stack_slack_channel - target_namespaces = var.monitoring_kube_prometheus_stack_target_namespaces - github_owner = var.monitoring_kube_prometheus_stack_github_owner - repo_name = var.monitoring_kube_prometheus_stack_repo_name - repo_branch = var.monitoring_kube_prometheus_stack_repo_branch - prometheus_request_memory = var.monitoring_kube_prometheus_stack_prometheus_request_memory - prometheus_request_cpu = var.monitoring_kube_prometheus_stack_prometheus_request_cpu - prometheus_limit_memory = var.monitoring_kube_prometheus_stack_prometheus_limit_memory - prometheus_limit_cpu = var.monitoring_kube_prometheus_stack_prometheus_limit_cpu + slack_webhook = var.monitoring_kube_prometheus_stack_slack_webhook + prometheus_storageclass = var.monitoring_kube_prometheus_stack_prometheus_storageclass + prometheus_storage_size = var.monitoring_kube_prometheus_stack_prometheus_storage_size + prometheus_retention = var.monitoring_kube_prometheus_stack_prometheus_retention + slack_channel = var.monitoring_kube_prometheus_stack_slack_channel + target_namespaces = var.monitoring_kube_prometheus_stack_target_namespaces + github_owner = var.monitoring_kube_prometheus_stack_github_owner + repo_name = var.monitoring_kube_prometheus_stack_repo_name + repo_branch = var.monitoring_kube_prometheus_stack_repo_branch + prometheus_request_memory = var.monitoring_kube_prometheus_stack_prometheus_request_memory + prometheus_request_cpu = var.monitoring_kube_prometheus_stack_prometheus_request_cpu + prometheus_limit_memory = var.monitoring_kube_prometheus_stack_prometheus_limit_memory + prometheus_limit_cpu = var.monitoring_kube_prometheus_stack_prometheus_limit_cpu providers = { github = github.fluxcd @@ -621,11 +622,11 @@ module "velero_flux_manifests" { # -------------------------------------------------- module "aws_subnet_exporter" { - source = "../../_sub/compute/k8s-subnet-exporter" - count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0 + source = "../../_sub/compute/k8s-subnet-exporter" + count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0 namespace_name = module.monitoring_namespace[0].name aws_account_id = var.aws_workload_account_id - aws_region = var.aws_region - image_tag = "0.2" - oidc_issuer = local.oidc_issuer + aws_region = var.aws_region + image_tag = "0.2" + oidc_issuer = local.oidc_issuer } \ No newline at end of file diff --git a/compute/k8s-services/vars.tf b/compute/k8s-services/vars.tf index 63669538b..acd6d9942 100644 --- a/compute/k8s-services/vars.tf +++ b/compute/k8s-services/vars.tf @@ -237,9 +237,9 @@ variable "monitoring_kube_prometheus_stack_grafana_notifier_name" { } variable "monitoring_kube_prometheus_stack_grafana_serviceaccount_name" { - type = string + type = string description = "Grafana serviceaccount to be used for pod" - default = "grafana-cloudwatch" + default = "grafana-cloudwatch" } variable "monitoring_kube_prometheus_stack_slack_webhook" { @@ -679,9 +679,9 @@ variable "crossplane_metrics_enabled" { } variable "crossplane_aws_iam_role_name" { - type = string + type = string description = "" - default = "provider-aws" + default = "provider-aws" } # -------------