Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENG-4153 | Document setting up GKE Workload Identity/EKS Pod Identity #200

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions vcluster/integrations/pod-identity/_category_.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"label": "Pod Identity",
"position": "4",
"collapsible": true,
"collapsed": false
}
176 changes: 176 additions & 0 deletions vcluster/integrations/pod-identity/_code/eks-pod-identity.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
#!/bin/bash

# Set up environment variables
export AWS_REGION="eu-central-1" # Replace with your actual AWS region if different
export CLUSTER_NAME="pod-identity-1" # Replace with your actual EKS cluster name if different
export NODE_INSTANCE_TYPE="t3.medium" # Replace with your actual instance type if different
export SERVICE_ACCOUNT_NAME="demo-sa" # Replace with your actual service account name if different
export SERVICE_ACCOUNT_NAMESPACE="default" # Replace with your actual namespace if different
export VCLUSTER_NAME="my-vcluster" # Replace with your actual vCluster name if different
export HOST=https://your.loft.host # Replace with your actual host
export AUTH_TOKEN=abcd1234 # Replace with your actual auth token
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
export SA_ROLE_NAME="AmazonEKSTFEBSCSIRole-${CLUSTER_NAME}"

# Define the function to get the KSA name using curl
get_ksa_name() {
local vcluster_ksa_name=$1
local vcluster_ksa_namespace=$2
local vcluster_name=$3
local host=$4
local auth_token=$5

local resource_path="/kubernetes/management/apis/management.loft.sh/v1/translatevclusterresourcenames"
local host_with_scheme=$([[ $host =~ ^(http|https):// ]] && echo "$host" || echo "https://$host")
local sanitized_host="${host_with_scheme%/}"
local full_url="${sanitized_host}${resource_path}"

local response=$(curl -s -k -X POST "$full_url" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${auth_token}" \
-d @- <<EOF
{
"spec": {
"name": "${vcluster_ksa_name}",
"namespace": "${vcluster_ksa_namespace}",
"vclusterName": "${vcluster_name}"
}
}
EOF
)

local status_name=$(echo "$response" | jq -r '.status.name')
if [[ -z "$status_name" || "$status_name" == "null" ]]; then
echo "Error: Unable to fetch KSA name from response: $response"
exit 1
fi
echo "$status_name"
}

# Get the KSA name
KSA_NAME=$(get_ksa_name "$SERVICE_ACCOUNT_NAME" "$SERVICE_ACCOUNT_NAMESPACE" "$VCLUSTER_NAME" "$HOST" "$AUTH_TOKEN")

# Create EKS cluster using eksctl
eksctl create cluster \
--name ${CLUSTER_NAME} \
--region ${AWS_REGION} \
--node-type ${NODE_INSTANCE_TYPE}

# Associate IAM OIDC provider with the EKS cluster
eksctl utils associate-iam-oidc-provider --region=${AWS_REGION} --cluster=${CLUSTER_NAME} --approve

# Create IAM role for the EBS CSI driver and associate policy
eksctl create iamserviceaccount \
--name ebs-csi-controller-sa \
--namespace kube-system \
--cluster ${CLUSTER_NAME} \
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
--approve \
--role-only \
--role-name ${SA_ROLE_NAME} \
--region ${AWS_REGION}

# Install the AWS EBS CSI driver as an EKS managed add-on
eksctl create addon \
--name aws-ebs-csi-driver \
--cluster ${CLUSTER_NAME} \
--service-account-role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/${SA_ROLE_NAME} \
--region ${AWS_REGION} \
--force

# Deploy EKS Pod Identity Webhook
aws eks create-addon --region ${AWS_REGION} --cluster-name ${CLUSTER_NAME} --addon-name eks-pod-identity-agent --addon-version v1.0.0-eksbuild.1

# Wait for EKS Pod Identity Webhook to be up and running
sleep 60

kubectl get pods -n kube-system | grep 'eks-pod-identity-webhook'

# Create vcluster.yaml content dynamically
cat <<EOF > vcluster.yaml
sync:
toHost:
serviceAccounts:
enabled: true
EOF

# Deploy vcluster using vcluster-cli
vcluster create ${VCLUSTER_NAME} --namespace ${VCLUSTER_NAME} -f vcluster.yaml

# Connect to the vCluster
vcluster connect ${VCLUSTER_NAME}

# Create example-workload.yaml content dynamically
cat <<EOF > example-workload.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: demo-sa
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: s3-list-buckets
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: s3-list-buckets
template:
metadata:
labels:
app: s3-list-buckets
spec:
serviceAccountName: demo-sa
containers:
- image: public.ecr.aws/aws-cli/aws-cli
command:
- "aws"
- "s3"
- "ls"
name: aws-pod
EOF

cat >my-policy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::*"
}
]
}
EOF

aws iam create-policy --policy-name my-policy --policy-document file://my-policy.json

cat >trust-relationship.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowEksAuthToAssumeRoleForPodIdentity",
"Effect": "Allow",
"Principal": {
"Service": "pods.eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
EOF

aws iam create-role --role-name my-role --assume-role-policy-document file://trust-relationship.json --description "my-role-description"

aws iam attach-role-policy --role-name my-role --policy-arn=arn:aws:iam::${AWS_ACCOUNT_ID}:policy/my-policy

aws eks create-pod-identity-association --cluster-name ${CLUSTER_NAME} --role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/my-role --namespace ${VCLUSTER_NAME} --service-account ${KSA_NAME}

kubectl logs -l app=s3-list-buckets -n default
209 changes: 209 additions & 0 deletions vcluster/integrations/pod-identity/_code/eks-pod-identity.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
provider "aws" {
region = var.aws_region
}

provider "http" {
alias = "default"
}

provider "helm" {
kubernetes {
config_path = "${path.module}/kubeconfig_${var.cluster_name}"
}
}


# Filter out local zones, which are not currently supported
# with managed node groups
data "aws_availability_zones" "available" {
filter {
name = "opt-in-status"
values = ["opt-in-not-required"]
}
}

locals {
cluster_name = var.cluster_name
}

resource "random_string" "suffix" {
length = 8
special = false
}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.8.1"

name = "education-vpc"

cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)

private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]

enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true

public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}

private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
}

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "20.8.5"

cluster_name = local.cluster_name
cluster_version = "1.29"

cluster_endpoint_public_access = true
enable_cluster_creator_admin_permissions = true

cluster_addons = {
aws-ebs-csi-driver = {
service_account_role_arn = module.irsa-ebs-csi.iam_role_arn
}

eks-pod-identity-agent = {}
}

vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"

}

eks_managed_node_groups = {
one = {
name = "node-group-1"

instance_types = ["t3.small"]

min_size = 1
max_size = 3
desired_size = 2
}

two = {
name = "node-group-2"

instance_types = ["t3.small"]

min_size = 1
max_size = 2
desired_size = 1
}
}
}

data "aws_iam_policy_document" "assume_role" {
statement {
effect = "Allow"

principals {
type = "Service"
identifiers = ["pods.eks.amazonaws.com"]
}

actions = [
"sts:AssumeRole",
"sts:TagSession"
]
}
}

resource "aws_iam_role" "example" {
name = "eks-pod-identity-example"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}

resource "aws_iam_role_policy_attachment" "example_s3" {
policy_arn = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
role = aws_iam_role.example.name
}


# https://aws.amazon.com/blogs/containers/amazon-ebs-csi-driver-is-now-generally-available-in-amazon-eks-add-ons/
data "aws_iam_policy" "ebs_csi_policy" {
arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}

module "irsa-ebs-csi" {
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
version = "5.39.0"

create_role = true
role_name = "AmazonEKSTFEBSCSIRole-${module.eks.cluster_name}"
provider_url = module.eks.oidc_provider
role_policy_arns = [data.aws_iam_policy.ebs_csi_policy.arn]
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"]
}

resource "null_resource" "update_kubeconfig" {
provisioner "local-exec" {
command = <<EOT
eksctl utils write-kubeconfig --cluster=${module.eks.cluster_name} --region=${var.aws_region} --kubeconfig=${path.module}/kubeconfig_${module.eks.cluster_name}
EOT
}
depends_on = [ module.eks ]
}

module "synced_service_account_name" {
source = "github.com/loft-sh/vcluster-terraform-modules//single-namespace-rename"

providers = {
http.default = http.default
}


host = "https://localhost:8080"
auth_token = var.auth_token
resource_name = var.service_account_name
resource_namespace = var.service_account_namespace
vcluster_name = var.vcluster_name
}

resource "helm_release" "my_vcluster" {
name = var.vcluster_name
namespace = var.vcluster_name
create_namespace = true

repository = "https://charts.loft.sh"
chart = "vcluster"
version = "0.20.0-beta.6"

values = [
file("${path.module}/vcluster.yaml")
]

depends_on = [ null_resource.update_kubeconfig ]
}

resource "null_resource" "apply_example_workload" {
provisioner "local-exec" {
command = "vcluster connect ${var.vcluster_name} -n ${var.vcluster_name} -- kubectl apply -f ${path.module}/example-workload.yaml"
environment = {
"KUBECONFIG" = "${path.module}/kubeconfig_${module.eks.cluster_name}"
}
}

depends_on = [ helm_release.my_vcluster ]
}

resource "aws_eks_pod_identity_association" "example" {
cluster_name = module.eks.cluster_name
namespace = var.vcluster_name
service_account = module.synced_service_account_name.name
role_arn = aws_iam_role.example.arn

depends_on = [ null_resource.apply_example_workload ]
}
Loading
Loading