Skip to content

Commit

Permalink
Added separate IAM role for k8s-janitor (#428)
Browse files Browse the repository at this point in the history
  • Loading branch information
SEQUOIIA authored Feb 18, 2022
1 parent c465295 commit 729d41e
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 43 deletions.
67 changes: 67 additions & 0 deletions _sub/compute/k8s-blaster-namespace/dependencies.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@

data "aws_caller_identity" "workload_account" {
}

locals {
k8s_janitor_serviceaccount_name = "k8s-janitor-sa"
}

data "aws_iam_policy_document" "k8s_janitor" {
count = var.deploy ? 1 : 0
statement {
effect = "Allow"

actions = [
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject",
"s3:GetObjectAcl",
"s3:GetObjectTagging",
"s3:GetObjectTorrent",
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging",
"s3:GetObjectVersionTorrent",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:PutObjectTagging"
]

resources = ["arn:aws:s3:::dfds-oxygen-k8s-hellman/*"]
}

statement {
effect = "Allow"

actions = [
"s3:ListBucket"
]

resources = ["arn:aws:s3:::dfds-oxygen-k8s-hellman"]
}
}

data "aws_iam_policy_document" "k8s_janitor_trust" {
count = var.deploy ? 1 : 0
statement {
effect = "Allow"

principals {
type = "Federated"

identifiers = [
"arn:aws:iam::${data.aws_caller_identity.workload_account.account_id}:oidc-provider/${var.oidc_issuer}",
]
}

condition {
test = "StringEquals"
values = ["system:serviceaccount:${kubernetes_namespace.self_service[0].metadata[0].name}:${local.k8s_janitor_serviceaccount_name}"]
variable = "${var.oidc_issuer}:sub"
}

actions = ["sts:AssumeRoleWithWebIdentity"]
}
}
24 changes: 24 additions & 0 deletions _sub/compute/k8s-blaster-namespace/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -145,3 +145,27 @@ resource "aws_iam_role_policy_attachment" "param-store" {
role = element(concat(aws_iam_role.self_service.*.name, [""]), 0)
policy_arn = element(concat(aws_iam_policy.param_store.*.arn, [""]), 0)
}

# --------------------------------------------------
# k8s-janitor IAM role
# --------------------------------------------------

locals {
k8s_janitor_iam_role_name = "k8s-janitor"
}

resource "aws_iam_role" "k8s_janitor" {
count = var.deploy ? 1 : 0
name = local.k8s_janitor_iam_role_name
path = "/"
description = "Role for k8s-janitor to manage S3 buckets within its path"
assume_role_policy = data.aws_iam_policy_document.k8s_janitor_trust[0].json
max_session_duration = 3600
}

resource "aws_iam_role_policy" "k8s_janitor" {
count = var.deploy ? 1 : 0
name = local.k8s_janitor_iam_role_name
role = aws_iam_role.k8s_janitor[0].id
policy = data.aws_iam_policy_document.k8s_janitor[0].json
}
13 changes: 11 additions & 2 deletions _sub/compute/k8s-blaster-namespace/vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,18 @@ variable "cluster_name" {
variable "blaster_configmap_bucket" {
}

variable "oidc_issuer" {
type = string
description = "Used for iam policy oidc trust"
validation {
condition = substr(var.oidc_issuer, 0, 8) != "https://"
error_message = "Oidc_issuer may not contain https:// in the start of the variable."
}
}

variable "extra_permitted_roles" {
type = list(string)
default = []
type = list(string)
default = []
description = "Additional role ARNs that can be assumed from this namespace through KIAM"
validation {
condition = var.extra_permitted_roles == [] ? true : (
Expand Down
10 changes: 5 additions & 5 deletions compute/k8s-services/dependencies.tf
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ locals {
# --------------------------------------------------

locals {
grafana_iam_role_name = "${var.eks_cluster_name}-monitoring-grafana-cloudwatch"
grafana_iam_role_arn = "arn:aws:iam::${var.aws_workload_account_id}:role/${local.grafana_iam_role_name}"
grafana_iam_role_name = "${var.eks_cluster_name}-monitoring-grafana-cloudwatch"
grafana_iam_role_arn = "arn:aws:iam::${var.aws_workload_account_id}:role/${local.grafana_iam_role_name}"
}

# --------------------------------------------------
Expand Down Expand Up @@ -209,13 +209,13 @@ data "aws_iam_policy_document" "cloudwatch_metrics_trust" {
}

condition {
test = "StringEquals"
values = ["system:serviceaccount:${local.monitoring_namespace_name}:${var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name}"]
test = "StringEquals"
values = ["system:serviceaccount:${local.monitoring_namespace_name}:${var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name}"]
variable = "${local.oidc_issuer}:sub"
}

actions = ["sts:AssumeRoleWithWebIdentity"]
}
}
}

# ---------------------------------------------------------------------
Expand Down
65 changes: 33 additions & 32 deletions compute/k8s-services/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,7 @@ module "blaster_namespace" {
blaster_configmap_bucket = data.terraform_remote_state.cluster.outputs.blaster_configmap_bucket
kiam_server_role_arn = module.kiam_deploy.server_role_arn
extra_permitted_roles = var.blaster_namespace_extra_permitted_roles
oidc_issuer = local.oidc_issuer
}


Expand Down Expand Up @@ -365,9 +366,9 @@ module "cloudwatch_alarm_alb_targets_health" {
# --------------------------------------------------

module "monitoring_namespace" {
source = "../../_sub/compute/k8s-namespace"
count = var.monitoring_namespace_deploy ? 1 : 0
name = local.monitoring_namespace_name
source = "../../_sub/compute/k8s-namespace"
count = var.monitoring_namespace_deploy ? 1 : 0
name = local.monitoring_namespace_name
}


Expand All @@ -392,31 +393,31 @@ module "monitoring_goldpinger" {
# --------------------------------------------------

module "monitoring_kube_prometheus_stack" {
source = "../../_sub/compute/helm-kube-prometheus-stack"
count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0
cluster_name = var.eks_cluster_name
chart_version = var.monitoring_kube_prometheus_stack_chart_version
namespace = module.monitoring_namespace[0].name
priority_class = var.monitoring_kube_prometheus_stack_priority_class
grafana_admin_password = var.monitoring_kube_prometheus_stack_grafana_admin_password
grafana_ingress_path = var.monitoring_kube_prometheus_stack_grafana_ingress_path
grafana_host = "grafana.${var.eks_cluster_name}.${var.workload_dns_zone_name}"
grafana_notifier_name = "${var.eks_cluster_name}-alerting"
grafana_iam_role_arn = local.grafana_iam_role_arn # Coming from locals to avoid circular dependency between KIAM and Prometheus
source = "../../_sub/compute/helm-kube-prometheus-stack"
count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0
cluster_name = var.eks_cluster_name
chart_version = var.monitoring_kube_prometheus_stack_chart_version
namespace = module.monitoring_namespace[0].name
priority_class = var.monitoring_kube_prometheus_stack_priority_class
grafana_admin_password = var.monitoring_kube_prometheus_stack_grafana_admin_password
grafana_ingress_path = var.monitoring_kube_prometheus_stack_grafana_ingress_path
grafana_host = "grafana.${var.eks_cluster_name}.${var.workload_dns_zone_name}"
grafana_notifier_name = "${var.eks_cluster_name}-alerting"
grafana_iam_role_arn = local.grafana_iam_role_arn # Coming from locals to avoid circular dependency between KIAM and Prometheus
grafana_serviceaccount_name = var.monitoring_kube_prometheus_stack_grafana_serviceaccount_name
slack_webhook = var.monitoring_kube_prometheus_stack_slack_webhook
prometheus_storageclass = var.monitoring_kube_prometheus_stack_prometheus_storageclass
prometheus_storage_size = var.monitoring_kube_prometheus_stack_prometheus_storage_size
prometheus_retention = var.monitoring_kube_prometheus_stack_prometheus_retention
slack_channel = var.monitoring_kube_prometheus_stack_slack_channel
target_namespaces = var.monitoring_kube_prometheus_stack_target_namespaces
github_owner = var.monitoring_kube_prometheus_stack_github_owner
repo_name = var.monitoring_kube_prometheus_stack_repo_name
repo_branch = var.monitoring_kube_prometheus_stack_repo_branch
prometheus_request_memory = var.monitoring_kube_prometheus_stack_prometheus_request_memory
prometheus_request_cpu = var.monitoring_kube_prometheus_stack_prometheus_request_cpu
prometheus_limit_memory = var.monitoring_kube_prometheus_stack_prometheus_limit_memory
prometheus_limit_cpu = var.monitoring_kube_prometheus_stack_prometheus_limit_cpu
slack_webhook = var.monitoring_kube_prometheus_stack_slack_webhook
prometheus_storageclass = var.monitoring_kube_prometheus_stack_prometheus_storageclass
prometheus_storage_size = var.monitoring_kube_prometheus_stack_prometheus_storage_size
prometheus_retention = var.monitoring_kube_prometheus_stack_prometheus_retention
slack_channel = var.monitoring_kube_prometheus_stack_slack_channel
target_namespaces = var.monitoring_kube_prometheus_stack_target_namespaces
github_owner = var.monitoring_kube_prometheus_stack_github_owner
repo_name = var.monitoring_kube_prometheus_stack_repo_name
repo_branch = var.monitoring_kube_prometheus_stack_repo_branch
prometheus_request_memory = var.monitoring_kube_prometheus_stack_prometheus_request_memory
prometheus_request_cpu = var.monitoring_kube_prometheus_stack_prometheus_request_cpu
prometheus_limit_memory = var.monitoring_kube_prometheus_stack_prometheus_limit_memory
prometheus_limit_cpu = var.monitoring_kube_prometheus_stack_prometheus_limit_cpu

providers = {
github = github.fluxcd
Expand Down Expand Up @@ -621,11 +622,11 @@ module "velero_flux_manifests" {
# --------------------------------------------------

module "aws_subnet_exporter" {
source = "../../_sub/compute/k8s-subnet-exporter"
count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0
source = "../../_sub/compute/k8s-subnet-exporter"
count = var.monitoring_kube_prometheus_stack_deploy ? 1 : 0
namespace_name = module.monitoring_namespace[0].name
aws_account_id = var.aws_workload_account_id
aws_region = var.aws_region
image_tag = "0.2"
oidc_issuer = local.oidc_issuer
aws_region = var.aws_region
image_tag = "0.2"
oidc_issuer = local.oidc_issuer
}
8 changes: 4 additions & 4 deletions compute/k8s-services/vars.tf
Original file line number Diff line number Diff line change
Expand Up @@ -237,9 +237,9 @@ variable "monitoring_kube_prometheus_stack_grafana_notifier_name" {
}

variable "monitoring_kube_prometheus_stack_grafana_serviceaccount_name" {
type = string
type = string
description = "Grafana serviceaccount to be used for pod"
default = "grafana-cloudwatch"
default = "grafana-cloudwatch"
}

variable "monitoring_kube_prometheus_stack_slack_webhook" {
Expand Down Expand Up @@ -679,9 +679,9 @@ variable "crossplane_metrics_enabled" {
}

variable "crossplane_aws_iam_role_name" {
type = string
type = string
description = ""
default = "provider-aws"
default = "provider-aws"
}

# -------------
Expand Down

0 comments on commit 729d41e

Please sign in to comment.