diff --git a/.project_automation/functional_tests/entrypoint.sh b/.project_automation/functional_tests/entrypoint.sh index dc6068a..fda5377 100755 --- a/.project_automation/functional_tests/entrypoint.sh +++ b/.project_automation/functional_tests/entrypoint.sh @@ -41,6 +41,10 @@ run_test() { # Run taskcat e2e test run_test "cw-test" +run_test "cw-test-sra" + +run_test "cw-eks-test" + run_test "cw-test-ct" run_test "cw-test-ssm" diff --git a/.taskcat.yml b/.taskcat.yml index 5837eb0..c249bf1 100644 --- a/.taskcat.yml +++ b/.taskcat.yml @@ -18,6 +18,20 @@ tests: regions: - us-east-1 template: templates/crowdstrike_init_stack.yaml + cw-eks-test: + parameters: + FalconClientID: $[taskcat_ssm_/crowdstrike/falcon_client_id] + FalconSecret: $[taskcat_ssm_/crowdstrike/falcon_secret] + DockerAPIToken: $[taskcat_ssm_/crowdstrike/falcon_docker_api_token] + FalconCID: $[taskcat_ssm_/crowdstrike/falcon_cod] + EventBusAccount: $[taskcat_ssm_/crowdstrike/eventbus_account] + SourceS3BucketName: $[taskcat_autobucket] + S3BucketRegion: $[taskcat_current_region] + ProvisionOU: $[taskcat_ssm_/crowdstrike/provision-ou] + ExcludeRegions: $[taskcat_ssm_/crowdstrike/exclude_regions] + regions: + - us-east-1 + template: templates/crowdstrike_init_stack.yaml cw-test-trail: parameters: FalconClientID: $[taskcat_ssm_/crowdstrike/falcon_client_id] diff --git a/codebuild/source/buildspec.yml b/codebuild/source/buildspec.yml new file mode 100644 index 0000000..6fcf728 --- /dev/null +++ b/codebuild/source/buildspec.yml @@ -0,0 +1,34 @@ +version: 0.2 + +phases: + install: + on-failure: ABORT + commands: + - echo "Installing Prerequisites" + - apt-get -qq update + - apt-get -qq install -y python3 + - apt-get -qq install -y python3-pip + - pip3 install boto3 --quiet + - pip3 install botocore --quiet + - curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.5/2024-01-04/bin/linux/amd64/kubectl + - chmod +x ./kubectl + - mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH + - curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + - helm repo add crowdstrike https://crowdstrike.github.io/falcon-helm && helm repo update + - ARCH=amd64 + - PLATFORM=$(uname -s)_$ARCH + - curl -sLO "https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_$PLATFORM.tar.gz" + - tar -xzf eksctl_$PLATFORM.tar.gz -C /tmp && rm eksctl_$PLATFORM.tar.gz + - mv /tmp/eksctl /usr/local/bin + pre_build: + on-failure: ABORT + commands: + - python3 setup_cluster.py + - chmod +x setup_images.sh && ./setup_images.sh + - . /root/.bashrc + - chmod +x setup_manifests.sh && ./setup_manifests.sh + build: + on-failure: ABORT + commands: + - chmod +x install_sensor_$NODE_TYPE.sh + - ./install_sensor_$NODE_TYPE.sh \ No newline at end of file diff --git a/codebuild/source/falcon_admission.yaml b/codebuild/source/falcon_admission.yaml new file mode 100644 index 0000000..7663ff5 --- /dev/null +++ b/codebuild/source/falcon_admission.yaml @@ -0,0 +1,11 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconAdmission +metadata: + name: falcon-admission +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + registry: + type: REGISTRY diff --git a/codebuild/source/install_sensor_fargate.sh b/codebuild/source/install_sensor_fargate.sh new file mode 100755 index 0000000..025ada0 --- /dev/null +++ b/codebuild/source/install_sensor_fargate.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE} +OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\ +export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\ +export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\ +export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken'); + +echo "Creating kubeconfig for $CLUSTER" +aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER + +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="" + +pods=$(kubectl get pods -A) +case "$pods" in + *kpagent*) + echo "Protection Agent already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Protection Agent..." + helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent + ;; +esac +case "$pods" in + *falcon-operator*) + echo "Operator already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Operator..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-operator --namespace falcon-operator + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + ;; +esac +case "$pods" in + *falcon-sidecar-sensor*) + echo "Sensor already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing sensor..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-system --namespace falcon-system + kubectl create -f sidecar_sensor.yaml + ;; +esac +if [ $ENABLE_KAC == "true" ]; then + case "$pods" in + *falcon-admission*) + echo "Admission Controller already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Admission Controller..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-kac --namespace falcon-kac + kubectl create -f falcon_admission.yaml + ;; + esac +fi diff --git a/codebuild/source/install_sensor_nodegroup.sh b/codebuild/source/install_sensor_nodegroup.sh new file mode 100755 index 0000000..c672fff --- /dev/null +++ b/codebuild/source/install_sensor_nodegroup.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE} +OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\ +export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\ +export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\ +export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken'); + +echo "Creating kubeconfig for $CLUSTER" +aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER + +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="" + +pods=$(kubectl get pods -A) +case "$pods" in + *kpagent*) + echo "Protection Agent already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Protection Agent..." + helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent + ;; +esac +case "$pods" in + *falcon-operator*) + echo "Operator already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Operator..." + if [ $REGISTRY == "ecr" ]; then + eksctl utils associate-iam-oidc-provider --region $AWS_REGION --cluster $CLUSTER --approve + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + kubectl set env -n falcon-operator deployment/falcon-operator-controller-manager AWS_REGION=$IMAGE_REGION + else + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + fi + ;; +esac +case "$pods" in + *falcon-node-sensor*) + echo "Sensor already installed on cluster: $CLUSTER" + ;; + *) + + echo "Installing node sensor..." + if [ $REGISTRY == "ecr" ]; then + kubectl create -f node_sensor_ecr.yaml + else + kubectl create -f node_sensor.yaml + fi + ;; +esac +if [ $ENABLE_KAC == "true" ]; then + case "$pods" in + *falcon-admission*) + echo "Admission Controller already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Admission Controller..." + kubectl create -f falcon_admission.yaml + ;; + esac +fi diff --git a/codebuild/source/kpa_config.value b/codebuild/source/kpa_config.value new file mode 100644 index 0000000..09d2fac --- /dev/null +++ b/codebuild/source/kpa_config.value @@ -0,0 +1,10 @@ +image: + repository: KPA_URI + tag: KPA_TAG +crowdstrikeConfig: + clientID: FALCON_CLIENT_ID + clientSecret: FALCON_CLIENT_SECRET + clusterName: CLUSTER_ARN + env: CROWDSTRIKE_CLOUD + cid: CID_LOWER + dockerAPIToken: DOCKER_API_TOKEN diff --git a/codebuild/source/node_sensor.yaml b/codebuild/source/node_sensor.yaml new file mode 100644 index 0000000..43e436e --- /dev/null +++ b/codebuild/source/node_sensor.yaml @@ -0,0 +1,14 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconNodeSensor +metadata: + name: falcon-node-sensor +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + node: + backend: BACKEND + falcon: + tags: + - daemonset \ No newline at end of file diff --git a/codebuild/source/node_sensor_ecr.yaml b/codebuild/source/node_sensor_ecr.yaml new file mode 100644 index 0000000..3ca423b --- /dev/null +++ b/codebuild/source/node_sensor_ecr.yaml @@ -0,0 +1,21 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconNodeSensor +metadata: + labels: + crowdstrike.com/component: sample + crowdstrike.com/created-by: falcon-operator + crowdstrike.com/instance: falcon-node-sensor + crowdstrike.com/managed-by: kustomize + crowdstrike.com/name: falconnodesensor + crowdstrike.com/part-of: Falcon + crowdstrike.com/provider: crowdstrike + name: falcon-node-sensor +spec: + node: + backend: BACKEND + image: NODE_SENSOR_URI:NODE_SENSOR_TAG + falcon: + cid: CID + trace: none + tags: + - daemonset \ No newline at end of file diff --git a/codebuild/source/setup_cluster.py b/codebuild/source/setup_cluster.py new file mode 100644 index 0000000..eb74b02 --- /dev/null +++ b/codebuild/source/setup_cluster.py @@ -0,0 +1,116 @@ +import os +import time +import boto3 +import botocore + +AWS_REGION = os.environ['AWS_REGION'] +PRINCIPAL_ARN = os.environ['PRINCIPAL_ARN'] +USERNAME = os.environ['USERNAME'] +CLUSTER = os.environ['CLUSTER'] +NODETYPE = os.environ['NODE_TYPE'] +ACCOUNT_ID = os.environ['ACCOUNT_ID'] +REGION = os.environ['REGION'] +SWITCH_ROLE = os.environ['SWITCH_ROLE'] +NAT_IP = os.environ['NAT_IP'] +ACCESS_POLICY = 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy' + +def check_cluster(session): + client = session.client( + service_name='eks', + region_name=AWS_REGION + ) + + cluster_details = client.describe_cluster( + name=CLUSTER + ) + public_access_cidrs = cluster_details.get('cluster', {}).get('resourcesVpcConfig', {}).get('publicAccessCidrs') + while 'ACTIVE' not in cluster_details.get('cluster', {}).get('status'): + time.sleep(60) + cluster_details = client.describe_cluster( + name=CLUSTER + ) + else: + print(f'Cluster {CLUSTER} is now active') + return public_access_cidrs + +def setup_cluster(session, public_access_cidrs): + client = session.client( + service_name='eks', + region_name=AWS_REGION + ) + + try: + print(f'Adding access entry for {CLUSTER}') + client.create_access_entry( + clusterName=CLUSTER, + principalArn=PRINCIPAL_ARN, + username=USERNAME, + type='STANDARD' + ) + + except botocore.exceptions.ClientError as error: + if error.response['Error']['Code'] == "ResourceInUseException": + print(f'Skipping Access Entry for {CLUSTER}: {PRINCIPAL_ARN} already exists') + else: + print(error) + try: + print(f'Adding access policy for {CLUSTER}') + client.associate_access_policy( + clusterName=CLUSTER, + principalArn=PRINCIPAL_ARN, + policyArn=ACCESS_POLICY, + accessScope={ + 'type': 'cluster' + } + ) + except botocore.exceptions.ClientError as error: + print(error) + try: + print(f'Adding NAT IP for {CLUSTER}') + public_access_cidrs.append(f'{NAT_IP}/32') + response = session.update_cluster_config( + name=CLUSTER, + resourcesVpcConfig={ + 'publicAccessCidrs': public_access_cidrs + } + ) + update_id = response['update']['id'] + update_response = client.describe_update( + name=CLUSTER, + updateId=update_id + ) + while update_response['update']['status'] in 'InProgress': + print('waiting for update to complete...') + time.sleep(30) + update_response = client.describe_update( + name=CLUSTER, + updateId=update_id + ) + except botocore.exceptions.ClientError as error: + print(error) + print(f'Cluster: {CLUSTER} is now setup') + return + +def new_session(): + try: + sts_connection = boto3.client('sts') + credentials = sts_connection.assume_role( + RoleArn=f'arn:aws:iam::{ACCOUNT_ID}:role/{SWITCH_ROLE}', + RoleSessionName=f'crowdstrike-eks-{ACCOUNT_ID}' + ) + return boto3.session.Session( + aws_access_key_id=credentials['Credentials']['AccessKeyId'], + aws_secret_access_key=credentials['Credentials']['SecretAccessKey'], + aws_session_token=credentials['Credentials']['SessionToken'], + region_name=REGION + ) + except sts_connection.exceptions.ClientError as exc: + # Print the error and continue. + # Handle what to do with accounts that cannot be accessed + # due to assuming role errors. + print("Cannot access adjacent account: ", ACCOUNT_ID, exc) + return None + +session = new_session() +public_access_cidrs = check_cluster(session) +setup_cluster(session, public_access_cidrs) \ No newline at end of file diff --git a/codebuild/source/setup_images.sh b/codebuild/source/setup_images.sh new file mode 100644 index 0000000..71a4737 --- /dev/null +++ b/codebuild/source/setup_images.sh @@ -0,0 +1,36 @@ +#!/bin/bash +echo "Registry is $REGISTRY" +if [ $REGISTRY == "ecr" ]; then + + echo "Getting ECR login..." + aws ecr get-login-password --region $IMAGE_REGION | docker login --username AWS --password-stdin $ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com + ecr_uri="$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/crowdstrike" + + echo "Pushing Node Sensor image..." + + node_sensor_repo="crowdstrike/falcon-sensor" + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'falcon-sensor' -c $ecr_uri + echo "export NODE_SENSOR_URI=$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/$node_sensor_repo" >> /root/.bashrc + node_sensor_tag=$(aws ecr list-images --repository-name $node_sensor_repo --query 'imageIds[*].imageTag' --output text) + echo "export NODE_SENSOR_TAG=$node_sensor_tag" >> /root/.bashrc + + echo "Pushing KPA image..." + + kpa_repo="crowdstrike/kpagent" + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'kpagent' -c $ecr_uri + echo "export KPA_URI=$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/$kpa_repo" >> /root/.bashrc + kpa_tag=$(aws ecr list-images --repository-name $kpa_repo --query 'imageIds[*].imageTag' --output text) + echo "export KPA_TAG=$kpa_tag" >> /root/.bashrc + +elif [ $REGISTRY == "crowdstrike" ]; then + + echo "Getting KPA image..." + + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'kpagent' + echo "export KPA_URI=registry.crowdstrike.com/kubernetes_protection/kpagent" >> /root/.bashrc + kpa_tag=$(docker images registry.crowdstrike.com/kubernetes_protection/kpagent --format "{{.Tag}}") + echo "export KPA_TAG=$kpa_tag" >> /root/.bashrc + +else + echo "Missing env variable REGISTRY" +fi \ No newline at end of file diff --git a/codebuild/source/setup_manifests.sh b/codebuild/source/setup_manifests.sh new file mode 100644 index 0000000..13e8693 --- /dev/null +++ b/codebuild/source/setup_manifests.sh @@ -0,0 +1,28 @@ +#!/bin/bash +CID_LOWER=$(echo $CID | cut -d '-' -f 1 | tr '[:upper:]' '[:lower:]') +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" kpa_config.value +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" kpa_config.value +sed -i "s~KPA_URI~$KPA_URI~" kpa_config.value +sed -i "s~KPA_TAG~$KPA_TAG~" kpa_config.value +sed -i "s~CLUSTER_ARN~$CLUSTER_ARN~" kpa_config.value +sed -i "s~CROWDSTRIKE_CLOUD~$CROWDSTRIKE_CLOUD~" kpa_config.value +sed -i "s~CID_LOWER~$CID_LOWER~" kpa_config.value +sed -i "s~DOCKER_API_TOKEN~$DOCKER_API_TOKEN~" kpa_config.value + +if [ $REGISTRY == "ecr" ]; then + sed -i "s~NODE_SENSOR_URI~$NODE_SENSOR_URI~" node_sensor_ecr.yaml + sed -i "s~NODE_SENSOR_TAG~$NODE_SENSOR_TAG~" node_sensor_ecr.yaml + sed -i "s~BACKEND~$BACKEND~" node_sensor_ecr.yaml + sed -i "s~CID~$CID~" node_sensor_ecr.yaml +elif [ $REGISTRY == "crowdstrike" ]; then + sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" node_sensor.yaml + sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" node_sensor.yaml + sed -i "s~BACKEND~$BACKEND~" node_sensor.yaml +fi + +sed -i "s~REGISTRY~$REGISTRY~" sidecar_sensor.yaml +sed -i "s~REGISTRY~$REGISTRY~" falcon_admission.yaml +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" sidecar_sensor.yaml +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" sidecar_sensor.yaml +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" falcon_admission.yaml +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" falcon_admission.yaml diff --git a/codebuild/source/sidecar_sensor.yaml b/codebuild/source/sidecar_sensor.yaml new file mode 100644 index 0000000..4d9d113 --- /dev/null +++ b/codebuild/source/sidecar_sensor.yaml @@ -0,0 +1,14 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconContainer +metadata: + name: falcon-sidecar-sensor +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + registry: + type: REGISTRY + injector: + disableDefaultNamespaceInjection: true + disableDefaultPodInjection: true \ No newline at end of file diff --git a/guide/content/architecture.md b/guide/content/architecture.md index 6c63fcf..4d6cf5e 100644 --- a/guide/content/architecture.md +++ b/guide/content/architecture.md @@ -6,13 +6,10 @@ description: Solution architecture. Deploying this ABI solution with default parameters builds the following architecture. -![CSPM Architecture diagram](/images/cspm_architecture.png) - -![SSM Distributor Architecture diagram](/images/distributor_architecture.png) - -![Sensor Management Architecture diagram](/images/sensor_architecture.png) - -As shown in the diagram, the solution sets up the following: +### CSPM Architecture +![CSPM Architecture diagram](/images/cspm_architecture.png) +### Sensor Management (OneClick) +![Sensor Management Architecture diagram](/images/sensor_architecture.png) * In all current accounts in your AWS organization: * IAM role that allows CrowdStrike to perform read-only activities. @@ -40,4 +37,31 @@ As shown in the diagram, the solution sets up the following: * IAM role that allows SSM Associations to retrive API Credentials from Secrets Manager. * SSM Associations to deploy Falcon Sensor via SSM Distributor Package against SSM-Managed instances. +### SSM Distributor +![SSM Distributor Architecture diagram](/images/distributor_architecture.png) +* In the child AWS accounts: + * Secrets Manager Secret to manage CrowdStrike API Credentials. + * IAM role that allows SSM Associations to retrive API Credentials from Secrets Manager. + * SSM Associations to deploy Falcon Sensor via SSM Distributor Package against SSM-Managed instances. + +### EKS Protection +![EKS Protection Diagram](/images/eks-protect-diagram.png) +* If you enable EKS Protection: + * In the centralized account: + * IAM Role for EventBridge to trigger Lambda + * IAM Role for Lambda Execution + * IAM Role for CodeBuild Execution + * EventBus to receive cluster events + * EventBridge Rule to trigger Lambda + * Lambda functions to process cluster events and trigger Codebuild + * CodeBuild project to apply Falcon Operator to EKS Clusters + * Secret to store Falcon API key + * Optional ECR repositories if registry = ecr + * VPC, NAT, EIP for CodeBuild project + * In the child accounts: + * IAM Role for EventBridge to trigger Lambda + * IAM Role for Lambda Execution + * IAM Role for CodeBuild Execution + * EventBridge Rule to send cluster events to centralized EventBus + **Next:** Choose [Deployment options](/deployment-options/index.html). \ No newline at end of file diff --git a/guide/content/deployment-steps.md b/guide/content/deployment-steps.md index 6c5338e..7f23c79 100644 --- a/guide/content/deployment-steps.md +++ b/guide/content/deployment-steps.md @@ -56,6 +56,21 @@ description: Deployment steps. * **Governed Regions**: If Create Default Org Trail = true: for AWS Control Tower, set to ct-regions (default). If not using AWS Control Tower, specify comma separated list of regions (e.g. us-west-2,us-east-1,ap-south-1) in lower case. * **Security Account Id**: If Create Default Org Trail = true: AWS Account ID of the Security Tooling account (ignored for AWS Control Tower environments). * **Log Archive Account Id**: If Create Default Org Trail = true: AWS Account ID of the Log Archive account (ignored for AWS Control Tower environments). + * EKS Protection + * **EKSProtection**: Enable CrowdStrike EKS Protection to automatically deploy Falcon Sensor against EKS Clusters. Allowed values include `true` or `false`. Default is `false` + * **FalconCID**: Your CrowdStrike Falcon CID with checksum. (eg. ********************************-ab) + * **DockerAPIToken**: Your Falcon Docker API Token + * **OrganizationId**: Your AWS Organization ID (eg. o-********) + * **EventBusAccount**: The account to centralize EKS Protection resources. This account must be the Organization Management Account or a Delegated Administrator. + * **EventBusName**: Name of the centralized EventBus. Default is `crowdstrike-eks-eventbus` + * **EventBridgeRoleName**: Name of the EventBridge IAM role. Default is `crowdstrike-eks-eventbridge-role` + * **EKSExecutionRoleName**: Name of the Target Execution IAM role. Default is `crowdstrike-eks-execution-role` + * **CodeBuildRoleName**: Name of the CodeBuild IAM role. Default is `crowdstrike-eks-codebuild-role` + * **CodeBuildProjectName**: Name of the CodeBuild Project. Default is `crowdstrike-eks-codebuild` + * **KubernetesUserName**: Name of the Kubernetes UserName. Default is `crowdstrike-eks` + * **Registry**: Source Falcon Image from CrowdStrike or mirror to ECR. Allowed values are `crowdstrike` or `ecr`. Default is `crowdstrike` + * **Backend**: kernel or bpf for Daemonset Sensor. Allowed Values are `kernel` or `bpf`. Default is `kernel` + * **EnableKAC**: Deploy Kubernetes Admission Controller (KAC). For more info see https://falcon.crowdstrike.com/documentation/page/aa4fccee/container-security#s41cbec3 3. Select both of the following capabilities and choose **Submit** to launch the stack. diff --git a/guide/content/images/eks-protect-diagram.png b/guide/content/images/eks-protect-diagram.png new file mode 100644 index 0000000..7dd88bd Binary files /dev/null and b/guide/content/images/eks-protect-diagram.png differ diff --git a/lambda_functions/source/codebuild/buildspec.yml b/lambda_functions/source/codebuild/buildspec.yml new file mode 100644 index 0000000..6fcf728 --- /dev/null +++ b/lambda_functions/source/codebuild/buildspec.yml @@ -0,0 +1,34 @@ +version: 0.2 + +phases: + install: + on-failure: ABORT + commands: + - echo "Installing Prerequisites" + - apt-get -qq update + - apt-get -qq install -y python3 + - apt-get -qq install -y python3-pip + - pip3 install boto3 --quiet + - pip3 install botocore --quiet + - curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/1.28.5/2024-01-04/bin/linux/amd64/kubectl + - chmod +x ./kubectl + - mkdir -p $HOME/bin && cp ./kubectl $HOME/bin/kubectl && export PATH=$HOME/bin:$PATH + - curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + - helm repo add crowdstrike https://crowdstrike.github.io/falcon-helm && helm repo update + - ARCH=amd64 + - PLATFORM=$(uname -s)_$ARCH + - curl -sLO "https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_$PLATFORM.tar.gz" + - tar -xzf eksctl_$PLATFORM.tar.gz -C /tmp && rm eksctl_$PLATFORM.tar.gz + - mv /tmp/eksctl /usr/local/bin + pre_build: + on-failure: ABORT + commands: + - python3 setup_cluster.py + - chmod +x setup_images.sh && ./setup_images.sh + - . /root/.bashrc + - chmod +x setup_manifests.sh && ./setup_manifests.sh + build: + on-failure: ABORT + commands: + - chmod +x install_sensor_$NODE_TYPE.sh + - ./install_sensor_$NODE_TYPE.sh \ No newline at end of file diff --git a/lambda_functions/source/codebuild/falcon_admission.yaml b/lambda_functions/source/codebuild/falcon_admission.yaml new file mode 100644 index 0000000..7663ff5 --- /dev/null +++ b/lambda_functions/source/codebuild/falcon_admission.yaml @@ -0,0 +1,11 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconAdmission +metadata: + name: falcon-admission +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + registry: + type: REGISTRY diff --git a/lambda_functions/source/codebuild/install_sensor_fargate.sh b/lambda_functions/source/codebuild/install_sensor_fargate.sh new file mode 100755 index 0000000..025ada0 --- /dev/null +++ b/lambda_functions/source/codebuild/install_sensor_fargate.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE} +OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\ +export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\ +export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\ +export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken'); + +echo "Creating kubeconfig for $CLUSTER" +aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER + +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="" + +pods=$(kubectl get pods -A) +case "$pods" in + *kpagent*) + echo "Protection Agent already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Protection Agent..." + helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent + ;; +esac +case "$pods" in + *falcon-operator*) + echo "Operator already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Operator..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-operator --namespace falcon-operator + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + ;; +esac +case "$pods" in + *falcon-sidecar-sensor*) + echo "Sensor already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing sensor..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-system --namespace falcon-system + kubectl create -f sidecar_sensor.yaml + ;; +esac +if [ $ENABLE_KAC == "true" ]; then + case "$pods" in + *falcon-admission*) + echo "Admission Controller already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Admission Controller..." + eksctl create fargateprofile --region $AWS_REGION --cluster $CLUSTER --name fp-falcon-kac --namespace falcon-kac + kubectl create -f falcon_admission.yaml + ;; + esac +fi diff --git a/lambda_functions/source/codebuild/install_sensor_nodegroup.sh b/lambda_functions/source/codebuild/install_sensor_nodegroup.sh new file mode 100755 index 0000000..c672fff --- /dev/null +++ b/lambda_functions/source/codebuild/install_sensor_nodegroup.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +role_arn=arn:aws:iam::${ACCOUNT_ID}:role/${SWITCH_ROLE} +OUT=$(aws sts assume-role --role-arn $role_arn --role-session-name crowdstrike-eks-codebuild);\ +export AWS_ACCESS_KEY_ID=$(echo $OUT | jq -r '.Credentials''.AccessKeyId');\ +export AWS_SECRET_ACCESS_KEY=$(echo $OUT | jq -r '.Credentials''.SecretAccessKey');\ +export AWS_SESSION_TOKEN=$(echo $OUT | jq -r '.Credentials''.SessionToken'); + +echo "Creating kubeconfig for $CLUSTER" +aws eks update-kubeconfig --region $AWS_REGION --name $CLUSTER + +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_SESSION_TOKEN="" + +pods=$(kubectl get pods -A) +case "$pods" in + *kpagent*) + echo "Protection Agent already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Protection Agent..." + helm upgrade --install -f kpa_config.value --create-namespace -n falcon-kubernetes-protection kpagent crowdstrike/cs-k8s-protection-agent + ;; +esac +case "$pods" in + *falcon-operator*) + echo "Operator already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Operator..." + if [ $REGISTRY == "ecr" ]; then + eksctl utils associate-iam-oidc-provider --region $AWS_REGION --cluster $CLUSTER --approve + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + kubectl set env -n falcon-operator deployment/falcon-operator-controller-manager AWS_REGION=$IMAGE_REGION + else + kubectl apply -f https://github.com/CrowdStrike/falcon-operator/releases/latest/download/falcon-operator.yaml + fi + ;; +esac +case "$pods" in + *falcon-node-sensor*) + echo "Sensor already installed on cluster: $CLUSTER" + ;; + *) + + echo "Installing node sensor..." + if [ $REGISTRY == "ecr" ]; then + kubectl create -f node_sensor_ecr.yaml + else + kubectl create -f node_sensor.yaml + fi + ;; +esac +if [ $ENABLE_KAC == "true" ]; then + case "$pods" in + *falcon-admission*) + echo "Admission Controller already installed on cluster: $CLUSTER" + ;; + *) + echo "Installing Admission Controller..." + kubectl create -f falcon_admission.yaml + ;; + esac +fi diff --git a/lambda_functions/source/codebuild/kpa_config.value b/lambda_functions/source/codebuild/kpa_config.value new file mode 100644 index 0000000..09d2fac --- /dev/null +++ b/lambda_functions/source/codebuild/kpa_config.value @@ -0,0 +1,10 @@ +image: + repository: KPA_URI + tag: KPA_TAG +crowdstrikeConfig: + clientID: FALCON_CLIENT_ID + clientSecret: FALCON_CLIENT_SECRET + clusterName: CLUSTER_ARN + env: CROWDSTRIKE_CLOUD + cid: CID_LOWER + dockerAPIToken: DOCKER_API_TOKEN diff --git a/lambda_functions/source/codebuild/node_sensor.yaml b/lambda_functions/source/codebuild/node_sensor.yaml new file mode 100644 index 0000000..43e436e --- /dev/null +++ b/lambda_functions/source/codebuild/node_sensor.yaml @@ -0,0 +1,14 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconNodeSensor +metadata: + name: falcon-node-sensor +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + node: + backend: BACKEND + falcon: + tags: + - daemonset \ No newline at end of file diff --git a/lambda_functions/source/codebuild/node_sensor_ecr.yaml b/lambda_functions/source/codebuild/node_sensor_ecr.yaml new file mode 100644 index 0000000..3ca423b --- /dev/null +++ b/lambda_functions/source/codebuild/node_sensor_ecr.yaml @@ -0,0 +1,21 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconNodeSensor +metadata: + labels: + crowdstrike.com/component: sample + crowdstrike.com/created-by: falcon-operator + crowdstrike.com/instance: falcon-node-sensor + crowdstrike.com/managed-by: kustomize + crowdstrike.com/name: falconnodesensor + crowdstrike.com/part-of: Falcon + crowdstrike.com/provider: crowdstrike + name: falcon-node-sensor +spec: + node: + backend: BACKEND + image: NODE_SENSOR_URI:NODE_SENSOR_TAG + falcon: + cid: CID + trace: none + tags: + - daemonset \ No newline at end of file diff --git a/lambda_functions/source/codebuild/setup_cluster.py b/lambda_functions/source/codebuild/setup_cluster.py new file mode 100644 index 0000000..eb74b02 --- /dev/null +++ b/lambda_functions/source/codebuild/setup_cluster.py @@ -0,0 +1,116 @@ +import os +import time +import boto3 +import botocore + +AWS_REGION = os.environ['AWS_REGION'] +PRINCIPAL_ARN = os.environ['PRINCIPAL_ARN'] +USERNAME = os.environ['USERNAME'] +CLUSTER = os.environ['CLUSTER'] +NODETYPE = os.environ['NODE_TYPE'] +ACCOUNT_ID = os.environ['ACCOUNT_ID'] +REGION = os.environ['REGION'] +SWITCH_ROLE = os.environ['SWITCH_ROLE'] +NAT_IP = os.environ['NAT_IP'] +ACCESS_POLICY = 'arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy' + +def check_cluster(session): + client = session.client( + service_name='eks', + region_name=AWS_REGION + ) + + cluster_details = client.describe_cluster( + name=CLUSTER + ) + public_access_cidrs = cluster_details.get('cluster', {}).get('resourcesVpcConfig', {}).get('publicAccessCidrs') + while 'ACTIVE' not in cluster_details.get('cluster', {}).get('status'): + time.sleep(60) + cluster_details = client.describe_cluster( + name=CLUSTER + ) + else: + print(f'Cluster {CLUSTER} is now active') + return public_access_cidrs + +def setup_cluster(session, public_access_cidrs): + client = session.client( + service_name='eks', + region_name=AWS_REGION + ) + + try: + print(f'Adding access entry for {CLUSTER}') + client.create_access_entry( + clusterName=CLUSTER, + principalArn=PRINCIPAL_ARN, + username=USERNAME, + type='STANDARD' + ) + + except botocore.exceptions.ClientError as error: + if error.response['Error']['Code'] == "ResourceInUseException": + print(f'Skipping Access Entry for {CLUSTER}: {PRINCIPAL_ARN} already exists') + else: + print(error) + try: + print(f'Adding access policy for {CLUSTER}') + client.associate_access_policy( + clusterName=CLUSTER, + principalArn=PRINCIPAL_ARN, + policyArn=ACCESS_POLICY, + accessScope={ + 'type': 'cluster' + } + ) + except botocore.exceptions.ClientError as error: + print(error) + try: + print(f'Adding NAT IP for {CLUSTER}') + public_access_cidrs.append(f'{NAT_IP}/32') + response = session.update_cluster_config( + name=CLUSTER, + resourcesVpcConfig={ + 'publicAccessCidrs': public_access_cidrs + } + ) + update_id = response['update']['id'] + update_response = client.describe_update( + name=CLUSTER, + updateId=update_id + ) + while update_response['update']['status'] in 'InProgress': + print('waiting for update to complete...') + time.sleep(30) + update_response = client.describe_update( + name=CLUSTER, + updateId=update_id + ) + except botocore.exceptions.ClientError as error: + print(error) + print(f'Cluster: {CLUSTER} is now setup') + return + +def new_session(): + try: + sts_connection = boto3.client('sts') + credentials = sts_connection.assume_role( + RoleArn=f'arn:aws:iam::{ACCOUNT_ID}:role/{SWITCH_ROLE}', + RoleSessionName=f'crowdstrike-eks-{ACCOUNT_ID}' + ) + return boto3.session.Session( + aws_access_key_id=credentials['Credentials']['AccessKeyId'], + aws_secret_access_key=credentials['Credentials']['SecretAccessKey'], + aws_session_token=credentials['Credentials']['SessionToken'], + region_name=REGION + ) + except sts_connection.exceptions.ClientError as exc: + # Print the error and continue. + # Handle what to do with accounts that cannot be accessed + # due to assuming role errors. + print("Cannot access adjacent account: ", ACCOUNT_ID, exc) + return None + +session = new_session() +public_access_cidrs = check_cluster(session) +setup_cluster(session, public_access_cidrs) \ No newline at end of file diff --git a/lambda_functions/source/codebuild/setup_images.sh b/lambda_functions/source/codebuild/setup_images.sh new file mode 100644 index 0000000..71a4737 --- /dev/null +++ b/lambda_functions/source/codebuild/setup_images.sh @@ -0,0 +1,36 @@ +#!/bin/bash +echo "Registry is $REGISTRY" +if [ $REGISTRY == "ecr" ]; then + + echo "Getting ECR login..." + aws ecr get-login-password --region $IMAGE_REGION | docker login --username AWS --password-stdin $ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com + ecr_uri="$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/crowdstrike" + + echo "Pushing Node Sensor image..." + + node_sensor_repo="crowdstrike/falcon-sensor" + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'falcon-sensor' -c $ecr_uri + echo "export NODE_SENSOR_URI=$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/$node_sensor_repo" >> /root/.bashrc + node_sensor_tag=$(aws ecr list-images --repository-name $node_sensor_repo --query 'imageIds[*].imageTag' --output text) + echo "export NODE_SENSOR_TAG=$node_sensor_tag" >> /root/.bashrc + + echo "Pushing KPA image..." + + kpa_repo="crowdstrike/kpagent" + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'kpagent' -c $ecr_uri + echo "export KPA_URI=$ACCOUNT_ID.dkr.ecr.$IMAGE_REGION.amazonaws.com/$kpa_repo" >> /root/.bashrc + kpa_tag=$(aws ecr list-images --repository-name $kpa_repo --query 'imageIds[*].imageTag' --output text) + echo "export KPA_TAG=$kpa_tag" >> /root/.bashrc + +elif [ $REGISTRY == "crowdstrike" ]; then + + echo "Getting KPA image..." + + curl https://raw.githubusercontent.com/CrowdStrike/falcon-scripts/main/bash/containers/falcon-container-sensor-pull/falcon-container-sensor-pull.sh | bash -s -- -u $FALCON_CLIENT_ID -s $FALCON_CLIENT_SECRET -t 'kpagent' + echo "export KPA_URI=registry.crowdstrike.com/kubernetes_protection/kpagent" >> /root/.bashrc + kpa_tag=$(docker images registry.crowdstrike.com/kubernetes_protection/kpagent --format "{{.Tag}}") + echo "export KPA_TAG=$kpa_tag" >> /root/.bashrc + +else + echo "Missing env variable REGISTRY" +fi \ No newline at end of file diff --git a/lambda_functions/source/codebuild/setup_manifests.sh b/lambda_functions/source/codebuild/setup_manifests.sh new file mode 100644 index 0000000..13e8693 --- /dev/null +++ b/lambda_functions/source/codebuild/setup_manifests.sh @@ -0,0 +1,28 @@ +#!/bin/bash +CID_LOWER=$(echo $CID | cut -d '-' -f 1 | tr '[:upper:]' '[:lower:]') +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" kpa_config.value +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" kpa_config.value +sed -i "s~KPA_URI~$KPA_URI~" kpa_config.value +sed -i "s~KPA_TAG~$KPA_TAG~" kpa_config.value +sed -i "s~CLUSTER_ARN~$CLUSTER_ARN~" kpa_config.value +sed -i "s~CROWDSTRIKE_CLOUD~$CROWDSTRIKE_CLOUD~" kpa_config.value +sed -i "s~CID_LOWER~$CID_LOWER~" kpa_config.value +sed -i "s~DOCKER_API_TOKEN~$DOCKER_API_TOKEN~" kpa_config.value + +if [ $REGISTRY == "ecr" ]; then + sed -i "s~NODE_SENSOR_URI~$NODE_SENSOR_URI~" node_sensor_ecr.yaml + sed -i "s~NODE_SENSOR_TAG~$NODE_SENSOR_TAG~" node_sensor_ecr.yaml + sed -i "s~BACKEND~$BACKEND~" node_sensor_ecr.yaml + sed -i "s~CID~$CID~" node_sensor_ecr.yaml +elif [ $REGISTRY == "crowdstrike" ]; then + sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" node_sensor.yaml + sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" node_sensor.yaml + sed -i "s~BACKEND~$BACKEND~" node_sensor.yaml +fi + +sed -i "s~REGISTRY~$REGISTRY~" sidecar_sensor.yaml +sed -i "s~REGISTRY~$REGISTRY~" falcon_admission.yaml +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" sidecar_sensor.yaml +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" sidecar_sensor.yaml +sed -i "s~FALCON_CLIENT_ID~$FALCON_CLIENT_ID~" falcon_admission.yaml +sed -i "s~FALCON_CLIENT_SECRET~$FALCON_CLIENT_SECRET~" falcon_admission.yaml diff --git a/lambda_functions/source/codebuild/sidecar_sensor.yaml b/lambda_functions/source/codebuild/sidecar_sensor.yaml new file mode 100644 index 0000000..4d9d113 --- /dev/null +++ b/lambda_functions/source/codebuild/sidecar_sensor.yaml @@ -0,0 +1,14 @@ +apiVersion: falcon.crowdstrike.com/v1alpha1 +kind: FalconContainer +metadata: + name: falcon-sidecar-sensor +spec: + falcon_api: + client_id: FALCON_CLIENT_ID + client_secret: FALCON_CLIENT_SECRET + cloud_region: autodiscover + registry: + type: REGISTRY + injector: + disableDefaultNamespaceInjection: true + disableDefaultPodInjection: true \ No newline at end of file diff --git a/lambda_functions/source/cw-helper/organizations.py b/lambda_functions/source/cw-helper/organizations.py index 2f8d143..3779e91 100644 --- a/lambda_functions/source/cw-helper/organizations.py +++ b/lambda_functions/source/cw-helper/organizations.py @@ -15,6 +15,8 @@ LOGGER = logging.getLogger(__name__) log_level: str = os.environ.get("LOG_LEVEL", "ERROR") LOGGER.setLevel(log_level) +EVENTBUS_ACCOUNT = os.environ.get('EVENTBUS_ACCOUNT') +EKS_PROTECTION = os.environ.get('EKS_PROTECTION') def get_org_id(): @@ -26,8 +28,22 @@ def get_org_id(): management_account_session = boto3.Session() org_client = management_account_session.client("organizations") response = org_client.describe_organization()["Organization"] + organization_id = response["Id"] LOGGER.debug({"API_Call": "organizations:DescribeOrganization", "API_Response": response}) - return {"OrganizationId": response["Id"]} + return organization_id + +def get_parents(): + """Get AWS Organization ID. + + Returns: + Response data for custom resource + """ + management_account_session = boto3.Session() + org_client = management_account_session.client("organizations") + response = org_client.list_parents(ChildId=EVENTBUS_ACCOUNT).get('Parents') + eventbus_account_ou = response[0].get('Id') + LOGGER.debug({"API_Call": "organizations:ListParents", "API_Response": response}) + return eventbus_account_ou def lambda_handler(event, context): """Lambda Handler. @@ -37,9 +53,14 @@ def lambda_handler(event, context): context: runtime information """ try: - data = get_org_id() - cfnresponse.send(event, context, cfnresponse.SUCCESS, data, data["OrganizationId"]) + data_dict = {} + organization_id = get_org_id() + data_dict['organization_id'] = organization_id + if EKS_PROTECTION == "true": + eventbus_account_ou = get_parents() + data_dict['eventbus_account_ou'] = eventbus_account_ou + cfnresponse.send(event, context, cfnresponse.SUCCESS, data_dict, data_dict["organization_id"]) except Exception: LOGGER.exception("Unexpected!") reason = f"See the details in CloudWatch Log Stream: '{context.log_group_name}'" - cfnresponse.send(event, context, cfnresponse.FAILED, {}, data["OrganizationId"], reason=reason) + cfnresponse.send(event, context, cfnresponse.FAILED, {}, data_dict, reason=reason) diff --git a/lambda_functions/source/eks-existing-clusters/lambda.py b/lambda_functions/source/eks-existing-clusters/lambda.py new file mode 100644 index 0000000..bc5ca85 --- /dev/null +++ b/lambda_functions/source/eks-existing-clusters/lambda.py @@ -0,0 +1,253 @@ +"""Function running CrowdStrike EKS Protection for existing EKS Clusters.""" +import json +import os +import logging +from datetime import date +import botocore +import boto3 +import requests + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +# CONSTANTS +SUCCESS = "SUCCESS" +FAILED = "FAILED" + +DATE = date.today() +PROJECT = os.environ['project_name'] +BUCKET = os.environ['artifact_bucket'] +REGION = os.environ['AWS_DEFAULT_REGION'] +SWITCH_ROLE = os.environ['lambda_switch_role'] + +def accounts(): + """Function getting AWS Account list.""" + try: + session = boto3.session.Session() + client = session.client( + service_name='organizations', + region_name=REGION + ) + response = client.list_accounts() + response_accounts = response['Accounts'] + next_token = response.get('NextToken', None) + + while next_token: + response = client.list_accounts(NextToken=next_token) + response_accounts += response['Accounts'] + next_token = response.get('NextToken', None) + + active_accounts = [a for a in response_accounts if a['Status'] == 'ACTIVE'] + return active_accounts + except client.exceptions.AccessDeniedException: + print("Cannot autodiscover adjacent accounts: \ + cannot list accounts within the AWS organization") + return None + +def new_session(account_id, region): + """Function establishing boto3 session.""" + try: + sts_connection = boto3.client('sts') + credentials = sts_connection.assume_role( + RoleArn=f'arn:aws:iam::{account_id}:role/{SWITCH_ROLE}', + RoleSessionName=f'crowdstrike-eks-{account_id}' + ) + return boto3.session.Session( + aws_access_key_id=credentials['Credentials']['AccessKeyId'], + aws_secret_access_key=credentials['Credentials']['SecretAccessKey'], + aws_session_token=credentials['Credentials']['SessionToken'], + region_name=region + ) + except sts_connection.exceptions.ClientError as exc: + # Print the error and continue. + # Handle what to do with accounts that cannot be accessed + # due to assuming role errors. + print("Cannot access adjacent account: ", account_id, exc) + return None + + +def regions(): + """Function getting active AWS Regions.""" + session = boto3.session.Session() + client = session.client( + service_name='ec2', + region_name=REGION + ) + + active_regions = client.describe_regions()['Regions'] + return active_regions + + +def clusters(session, region_name): + """Function getting EKS Clusters.""" + client = session.client( + service_name='eks', + region_name=region_name + ) + + response = client.list_clusters(maxResults=100) + eks_clusters = response['clusters'] + next_token = response['NextToken'] if 'NextToken' in response else None + + while next_token: + response = client.list_clusters(maxResults=100, NextToken=next_token) + eks_clusters += response['clusters'] + next_token = response['NextToken'] if 'NextToken' in response else None + + return eks_clusters + +def describe_cluster(session, region_name, cluster_name): + """Function checking EKS Cluster.""" + client = session.client( + service_name='eks', + region_name=region_name + ) + + response = client.describe_cluster(name=cluster_name) + cluster_arn = response.get('cluster', {}).get('arn') + auth_mode = response.get('cluster', {}).get('accessConfig', \ + {}).get('authenticationMode') + public_endpoint = response.get('cluster', {}).get('resourcesVpcConfig', \ + {}).get('endpointPublicAccess') + + return cluster_arn, auth_mode, public_endpoint + +def check_fargate(session, region_name, cluster_name): + """Function checking for Fargate.""" + client = session.client( + service_name='eks', + region_name=region_name + ) + + try: + response = client.list_fargate_profiles( + clusterName=cluster_name, + maxResults=10 + ) + if response['fargateProfileNames'] not in []: + logger.info('No fargate profiles found, setting node_type to nodegroup...') + node_type = 'nodegroup' + else: + node_type = 'fargate' + return node_type + except botocore.exceptions.ClientError as error: + logger.error(error) + return None + +def start_build(cluster_name, cluster_arn, node_type, account_id, region_name): + """Function running CodeBuild for EKS Protection.""" + try: + session = boto3.session.Session() + client = session.client( + service_name='codebuild', + region_name=REGION + ) + build = client.start_build( + projectName=PROJECT, + artifactsOverride={ + 'type': 'S3', + 'location': f'{BUCKET}', + 'path': 'BuildResults', + 'name': f'{cluster_name}-{DATE}', + 'packaging': 'ZIP' + }, + environmentVariablesOverride=[ + { + 'name': 'CLUSTER', + 'value': f'{cluster_name}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'NODE_TYPE', + 'value': f'{node_type}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'CLUSTER_ARN', + 'value': f'{cluster_arn}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'ACCOUNT_ID', + 'value': f'{account_id}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'REGION', + 'value': f'{region_name}', + 'type': 'PLAINTEXT' + } + ] + ) + build_id = build.get('build', {}).get('id') + logger.info('Started build %s, buildId %s' % (PROJECT, build_id)) + except botocore.exceptions.ClientError as error: + logger.error(error) + +def cfnresponse_send(event, response_status, response_data, physical_resource_id=None): + """Function sending response to CloudFormation.""" + response_url = event['ResponseURL'] + print(response_url) + response_body = {} + response_body['Status'] = response_status + response_body['Reason'] = 'See the details in CloudWatch Log Stream: ' + response_body['PhysicalResourceId'] = physical_resource_id + response_body['StackId'] = event['StackId'] + response_body['RequestId'] = event['RequestId'] + response_body['LogicalResourceId'] = event['LogicalResourceId'] + response_body['Data'] = response_data + json_response_body = json.dumps(response_body) + print("Response body:\n" + json_response_body) + headers = { + 'content-type': '', + 'content-length': str(len(json_response_body)) + } + try: + response = requests.put(response_url, + data=json_response_body, + headers=headers, + timeout=5) + print("Status code: " + response.reason) + except Exception as e: + print("send(..) failed executing requests.put(..): " + str(e)) + +def lambda_handler(event, context): + """Function handler.""" + logger.info('Got event %s' % event) + logger.info('Context %s' % context) + logger.info('Gathering Event Details...') + response_d = {} + if event["RequestType"] in ["Create"]: + try: + for account in accounts(): + account_id = account['Id'] + for region in regions(): + region_name = region["RegionName"] + session = new_session(account_id, region_name) + if session: + for cluster_name in clusters(session, region_name): + + cluster_arn, auth_mode, public_endpoint = describe_cluster(session, + region_name, + cluster_name) + if public_endpoint and 'API' in auth_mode: + node_type = check_fargate(session, region_name, cluster_name) + if node_type: + start_build(cluster_name, + cluster_arn, + node_type, + account_id, + region_name) + else: + logger.info('Access denied for cluster %s. \ + Please verify that API Access and Public \ + Endpoint are enabled' % cluster_name) + response_d['status'] = "success" + cfnresponse_send(event, SUCCESS, response_d, "CustomResourcePhysicalID") + except botocore.exceptions.ClientError as error: + logger.error(error) + response_d['error'] = error + cfnresponse_send(event, SUCCESS, response_d, "CustomResourcePhysicalID") + else: + response = {"Status": "Complete"} + cfnresponse_send(event, "SUCCESS", response, "CustomResourcePhysicalID") diff --git a/lambda_functions/source/eks-existing-clusters/requirements.txt b/lambda_functions/source/eks-existing-clusters/requirements.txt new file mode 100644 index 0000000..c3d4b5d --- /dev/null +++ b/lambda_functions/source/eks-existing-clusters/requirements.txt @@ -0,0 +1,2 @@ +urllib3<2 +requests==2.31.0 \ No newline at end of file diff --git a/lambda_functions/source/eks-new-clusters/lambda.py b/lambda_functions/source/eks-new-clusters/lambda.py new file mode 100644 index 0000000..8235735 --- /dev/null +++ b/lambda_functions/source/eks-new-clusters/lambda.py @@ -0,0 +1,121 @@ +"""Function running CrowdStrike EKS Protection for new EKS Clusters.""" +import os +import logging +from datetime import date +import botocore +import boto3 + +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +DATE = date.today() +PROJECT = os.environ['project_name'] +BUCKET = os.environ['artifact_bucket'] +SWITCH_ROLE = os.environ['lambda_switch_role'] + +def new_session(account_id, region_name): + """Function establishing boto3 session.""" + try: + sts_connection = boto3.client('sts') + credentials = sts_connection.assume_role( + RoleArn=f'arn:aws:iam::{account_id}:role/{SWITCH_ROLE}', + RoleSessionName=account_id + ) + return boto3.session.Session( + aws_access_key_id=credentials['Credentials']['AccessKeyId'], + aws_secret_access_key=credentials['Credentials']['SecretAccessKey'], + aws_session_token=credentials['Credentials']['SessionToken'], + region_name=region_name + ) + except sts_connection.exceptions.ClientError as exc: + # Print the error and continue. + # Handle what to do with accounts that cannot be accessed + # due to assuming role errors. + print("Cannot access adjacent account: ", account_id, exc) + return None + +def start_build(region, cluster_name, cluster_arn, node_type, account_id, region_name): + """Function running CodeBuild for EKS Protection.""" + try: + session = boto3.session.Session() + client = session.client( + service_name='codebuild', + region_name=region + ) + build = client.start_build( + projectName=PROJECT, + artifactsOverride={ + 'type': 'S3', + 'location': f'{BUCKET}', + 'path': 'BuildResults', + 'name': f'{cluster_name}-{DATE}', + 'packaging': 'ZIP' + }, + environmentVariablesOverride=[ + { + 'name': 'CLUSTER', + 'value': f'{cluster_name}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'NODE_TYPE', + 'value': f'{node_type}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'CLUSTER_ARN', + 'value': f'{cluster_arn}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'ACCOUNT_ID', + 'value': f'{account_id}', + 'type': 'PLAINTEXT' + }, + { + 'name': 'REGION', + 'value': f'{region_name}', + 'type': 'PLAINTEXT' + } + ] + ) + build_id = build.get('build', {}).get('id') + logger.info('Started build %s, buildId %s' % (PROJECT, build_id)) + except botocore.exceptions.ClientError as error: + logger.error(error) + +def lambda_handler(event,context): + """Function handler.""" + logger.info('Got event %s' % event) + logger.info('Context %s' % context) + logger.info('Gathering Event Details...') + region_name = event['region'] + account_id = event['detail']['userIdentity']['accountId'] + cluster_name = event['detail']['requestParameters']['name'] + event_name = event['detail']['eventName'] + if 'CreateCluster' in event_name: + node_type = 'nodegroup' + elif 'CreateFargateProfile' in event_name: + node_type = 'fargate' + else: + node_type = 'nodegroup' + logger.info('Checking EKS Cluster for API Access Config..') + try: + session = new_session(account_id, region_name) + client = session.client( + service_name='eks' + ) + cluster_details = client.describe_cluster( + name=cluster_name + ) + cluster_arn = cluster_details.get('cluster', {}).get('arn') + auth_mode = cluster_details.get('cluster', {}).get('accessConfig',\ + {}).get('authenticationMode') + public_endpoint = cluster_details.get('cluster', {}).get('resourcesVpcConfig',\ + {}).get('endpointPublicAccess') + if public_endpoint and 'API' in auth_mode: + start_build(region_name, cluster_name, cluster_arn, node_type, account_id, region_name) + else: + logger.info('API Access not enabled on cluster %s' % cluster_name) + except botocore.exceptions.ClientError as error: + logger.error(error) diff --git a/lambda_functions/source/register-organization/lambda.py b/lambda_functions/source/register-organization/lambda.py index 4cd0c8d..d7f58fd 100644 --- a/lambda_functions/source/register-organization/lambda.py +++ b/lambda_functions/source/register-organization/lambda.py @@ -1,11 +1,13 @@ +"""Function to register AWS Organization with CrowdStrike""" +# pylint: disable=line-too-long import json import logging import os import sys +import base64 import subprocess import boto3 import requests -import base64 from botocore.exceptions import ClientError # pip install falconpy package to /tmp/ and add to path @@ -21,8 +23,8 @@ FAILED = "FAILED" VERSION = "1.0.0" -name = "crowdstrike-cloud-abi" -useragent = ("%s/%s" % (name, VERSION)) +NAME = "crowdstrike-cloud-abi" +USERAGENT = ("%s/%s" % (NAME, VERSION)) SECRET_STORE_NAME = os.environ['secret_name'] SECRET_STORE_REGION = os.environ['secret_region'] @@ -34,6 +36,7 @@ FALCON_ACCOUNT_TYPE = os.environ['falcon_account_type'] def get_secret(): + """Function to get secret""" session = boto3.session.Session() client = session.client( service_name='secretsmanager', @@ -44,33 +47,25 @@ def get_secret(): SecretId=SECRET_STORE_NAME ) except ClientError as e: - if e.response['Error']['Code'] == 'DecryptionFailureException': - raise e - elif e.response['Error']['Code'] == 'InternalServiceErrorException': - raise e - elif e.response['Error']['Code'] == 'InvalidParameterException': - raise e - elif e.response['Error']['Code'] == 'InvalidRequestException': - raise e - elif e.response['Error']['Code'] == 'ResourceNotFoundException': - raise e + raise e + if 'SecretString' in get_secret_value_response: + secret = get_secret_value_response['SecretString'] else: - if 'SecretString' in get_secret_value_response: - secret = get_secret_value_response['SecretString'] - else: - secret = base64.b64decode(get_secret_value_response['SecretBinary']) - return secret + secret = base64.b64decode(get_secret_value_response['SecretBinary']) + return secret def get_management_id(): - ORG = boto3.client('organizations') + """Function to get Organization Id""" + org = boto3.client('organizations') try: - orgIDstr = ORG.list_roots()['Roots'][0]['Arn'].rsplit('/')[1] - return orgIDstr - except Exception as e: + org_id = org.list_roots()['Roots'][0]['Arn'].rsplit('/')[1] + return org_id + except Exception: logger.error('This stack runs only on the management of the AWS Organization') return False def get_active_regions(): + """Function to get active Regions""" session = boto3.session.Session() client = session.client( service_name='ec2', @@ -119,73 +114,76 @@ def get_active_regions(): return my_regions, comm_gov_eb_regions, ssm_regions except Exception as e: return e - -def cfnresponse_send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False): - responseUrl = event['ResponseURL'] - print(responseUrl) - responseBody = {} - responseBody['Status'] = responseStatus - responseBody['Reason'] = 'See the details in CloudWatch Log Stream: ' - responseBody['PhysicalResourceId'] = physicalResourceId - responseBody['StackId'] = event['StackId'] - responseBody['RequestId'] = event['RequestId'] - responseBody['LogicalResourceId'] = event['LogicalResourceId'] - responseBody['Data'] = responseData - json_responseBody = json.dumps(responseBody) - print("Response body:\n" + json_responseBody) + +def cfnresponse_send(event, response_status, response_data, physical_resource_id=None): + """Function sending response to CloudFormation.""" + response_url = event['ResponseURL'] + print(response_url) + response_body = {} + response_body['Status'] = response_status + response_body['Reason'] = 'See the details in CloudWatch Log Stream: ' + response_body['PhysicalResourceId'] = physical_resource_id + response_body['StackId'] = event['StackId'] + response_body['RequestId'] = event['RequestId'] + response_body['LogicalResourceId'] = event['LogicalResourceId'] + response_body['Data'] = response_data + json_response_body = json.dumps(response_body) + print("Response body:\n" + json_response_body) headers = { 'content-type': '', - 'content-length': str(len(json_responseBody)) + 'content-length': str(len(json_response_body)) } try: - response = requests.put(responseUrl, - data=json_responseBody, - headers=headers) + response = requests.put(response_url, + data=json_response_body, + headers=headers, + timeout=5) print("Status code: " + response.reason) except Exception as e: print("send(..) failed executing requests.put(..): " + str(e)) def lambda_handler(event, context): - logger.info('Got event {}'.format(event)) - logger.info('Context {}'.format(context)) + """Function handler""" + logger.info('Got event %s' % event) + logger.info('Context %s' % context) aws_account_id = context.invoked_function_arn.split(":")[4] regions, comm_gov_eb_regions, ssm_regions = get_active_regions() - OrgId = get_management_id() + org_id = get_management_id() try: secret_str = get_secret() if secret_str: secrets_dict = json.loads(secret_str) - FalconClientId = secrets_dict['FalconClientId'] - FalconSecret = secrets_dict['FalconSecret'] - falcon = CSPMRegistration(client_id=FalconClientId, - client_secret=FalconSecret, + falcon_client_id = secrets_dict['FalconClientId'] + falcon_secret = secrets_dict['FalconSecret'] + falcon = CSPMRegistration(client_id=falcon_client_id, + client_secret=falcon_secret, base_url=CS_CLOUD, - user_agent=useragent + user_agent=USERAGENT ) if event['RequestType'] in ['Create']: - logger.info('Event = {}'.format(event)) + logger.info('Event = %s' % event) if EXISTING_CLOUDTRAIL: response = falcon.create_aws_account(account_id=aws_account_id, - organization_id=OrgId, + organization_id=org_id, behavior_assessment_enabled=True, sensor_management_enabled=True, use_existing_cloudtrail=EXISTING_CLOUDTRAIL, - user_agent=useragent, + user_agent=USERAGENT, is_master=True, account_type=AWS_ACCOUNT_TYPE - ) + ) else: response = falcon.create_aws_account(account_id=aws_account_id, - organization_id=OrgId, + organization_id=org_id, behavior_assessment_enabled=True, sensor_management_enabled=True, use_existing_cloudtrail=EXISTING_CLOUDTRAIL, cloudtrail_region=AWS_REGION, - user_agent=useragent, + user_agent=USERAGENT, is_master=True, account_type=AWS_ACCOUNT_TYPE ) - logger.info('Response: {}'.format(response)) + logger.info('Response: %s' % response) if response['status_code'] == 201: cs_account = response['body']['resources'][0]['intermediate_role_arn'].rsplit('::')[1] response_d = { @@ -210,13 +208,13 @@ def lambda_handler(event, context): response_d['comm_gov_eb_regions'] = comm_gov_eb_regions response_d['my_regions'] = regions response_d['ssm_regions'] = ssm_regions - cfnresponse_send(event, context, SUCCESS, response_d, "CustomResourcePhysicalID") + cfnresponse_send(event, SUCCESS, response_d, "CustomResourcePhysicalID") elif 'already exists' in response['body']['errors'][0]['message']: logger.info(response['body']['errors'][0]['message']) logger.info('Getting existing registration data...') - response = falcon.get_aws_account(organization_ids=OrgId, - user_agent=useragent) - logger.info('Existing Registration Response: {}'.format(response)) + response = falcon.get_aws_account(organization_ids=org_id, + user_agent=USERAGENT) + logger.info('Existing Registration Response: %s' % response) cs_account = response['body']['resources'][0]['intermediate_role_arn'].rsplit('::')[1] response_d = { "cs_account_id": cs_account.rsplit(':')[0], @@ -240,25 +238,25 @@ def lambda_handler(event, context): response_d['comm_gov_eb_regions'] = comm_gov_eb_regions response_d['my_regions'] = regions response_d['ssm_regions'] = ssm_regions - cfnresponse_send(event, context, SUCCESS, response_d, "CustomResourcePhysicalID") + cfnresponse_send(event, SUCCESS, response_d, "CustomResourcePhysicalID") else: error = response['body']['errors'][0]['message'] - logger.info('Account Registration Failed with reason....{}'.format(error)) + logger.info('Account Registration Failed with reason....%s' % error) response_d = { "reason": response['body']['errors'][0]['message'] } - cfnresponse_send(event, context, FAILED, response_d, "CustomResourcePhysicalID") + cfnresponse_send(event, FAILED, response_d, "CustomResourcePhysicalID") elif event['RequestType'] in ['Update']: response_d = {} - logger.info('Event = ' + event['RequestType']) - cfnresponse_send(event, context, SUCCESS, response_d, "CustomResourcePhysicalID") + logger.info('Event = %s' % event['RequestType']) + cfnresponse_send(event, SUCCESS, response_d, "CustomResourcePhysicalID") elif event['RequestType'] in ['Delete']: - logger.info('Event = ' + event['RequestType']) - response = falcon.delete_aws_account(organization_ids=OrgId, - user_agent=useragent + logger.info('Event = %s' % event['RequestType']) + response = falcon.delete_aws_account(organization_ids=org_id, + user_agent=USERAGENT ) - cfnresponse_send(event, context, 'SUCCESS', response['body'], "CustomResourcePhysicalID") + cfnresponse_send(event, 'SUCCESS', response['body'], "CustomResourcePhysicalID") except Exception as err: # noqa: E722 # We can't communicate with the endpoint - logger.info('Registration Failed {}'.format(err)) - cfnresponse_send(event, context, FAILED, err, "CustomResourcePhysicalID") \ No newline at end of file + logger.info('Registration Failed %s' % err) + cfnresponse_send(event, FAILED, err, "CustomResourcePhysicalID") diff --git a/templates/crowdstrike_init_stack.yaml b/templates/crowdstrike_init_stack.yaml index f9d1b59..058f582 100644 --- a/templates/crowdstrike_init_stack.yaml +++ b/templates/crowdstrike_init_stack.yaml @@ -71,6 +71,22 @@ Metadata: - pGovernedRegions - pSecurityAccountId - pLogArchiveAccountId + - Label: + default: EKS Protection + Parameters: + - EKSProtection + - FalconCID + - DockerAPIToken + - EventBusAccount + - EventBusName + - EventBridgeRoleName + - EKSExecutionRoleName + - CodeBuildRoleName + - CodeBuildProjectName + - KubernetesUserName + - Registry + - Backend + - EnableKAC ParameterLabels: # Account Type @@ -161,6 +177,34 @@ Metadata: pLogArchiveAccountId: default: LogArchive Account Id + # EKS Protection + EKSProtection: + default: Enable EKS Protection + FalconCID: + default: Falcon CID + DockerAPIToken: + default: Falcon Docker API Token + EventBusAccount: + default: EKS Protection Account + EventBusName: + default: Name of EventBus + EventBridgeRoleName: + default: Name of EventBridge Role + EKSExecutionRoleName: + default: Name of Execution Role + CodeBuildProjectName: + default: CodeBuild Project Name + CodeBuildRoleName: + default: CodeBuild Role Name + KubernetesUserName: + default: Kubernetes User Name + Registry: + default: Registry + Backend: + default: Backend + EnableKAC: + default: Enable Kubernetes Admission Controller + Parameters: # Account Type FalconAccountType: @@ -368,6 +412,70 @@ Parameters: Description: SRA version to tag Type: String + # EKS Protection + EKSProtection: + Type: String + AllowedValues: ['true', 'false'] + Description: Enable CrowdStrike EKS Protection to automatically deploy Falcon Sensor against EKS Clusters. + Default: 'false' + FalconCID: + Type: String + Description: Your Falcon CID with checksum (eg. ********************************-**) + Default: "FalconCID" + DockerAPIToken: + Type: String + NoEcho: true + Description: Your Falcon Docker API Token + Default: "DockerAPIToken" + EventBusAccount: + Description: The account to centralize EKS Protection resources. This account must be the Organization Management Account or a Delegated Administrator. + Type: String + Default: "111111111111" + EventBusName: + Type: String + Description: Name of the centralized EventBus + Default: crowdstrike-eks-eventbus + EventBridgeRoleName: + Type: String + Description: Name of the EventBridge IAM role + Default: crowdstrike-eks-eventbridge-role + EKSExecutionRoleName: + Type: String + Description: Name of the Target Execution IAM role + Default: crowdstrike-eks-execution-role + CodeBuildProjectName: + Type: String + Description: Name of the CodeBuild Project + Default: crowdstrike-eks-codebuild + CodeBuildRoleName: + Type: String + Description: Name of the CodeBuild IAM role + Default: crowdstrike-eks-codebuild-role + KubernetesUserName: + Type: String + Description: Name of the Kubernetes UserName + Default: crowdstrike-eks + Registry: + Type: String + Description: Source Falcon Image from CrowdStrike or mirror to ECR + AllowedValues: + - 'crowdstrike' + - 'ecr' + Default: crowdstrike + Backend: + Type: String + Description: kernel or bpf for Daemonset Sensor + AllowedValues: + - 'kernel' + - 'bpf' + Default: kernel + EnableKAC: + Type: String + Description: Deploy Kubernetes Admission Controller (KAC). For more info see https://falcon.crowdstrike.com/documentation/page/aa4fccee/container-security#s41cbec3 + AllowedValues: + - true + - false + Default: true Mappings: CloudMap: @@ -395,6 +503,7 @@ Conditions: NotDelegatedAdminCommIOA: !And [ !Equals [ !Ref 'DelegatedAdmin', false ], !Equals [ !Ref 'AWSAccountType', commercial ], !Equals [ !Ref 'FalconAccountType', commercial ], !Equals [!Ref 'EnableIOA', true ]] NotDelegatedAdminGovIOA: !And [ !Equals [ !Ref 'DelegatedAdmin', false ], !Equals [ !Ref 'AWSAccountType', govcloud ], !Equals [ !Ref 'FalconAccountType', govcloud ], !Equals [!Ref 'EnableIOA', true ]] NotDelegatedAdminCommGovIOA: !And [!Equals [ !Ref 'DelegatedAdmin', false ], !Equals [ !Ref 'AWSAccountType', commercial ], !Equals [ !Ref 'FalconAccountType', govcloud ], !Equals [!Ref 'EnableIOA', true ]] + EnableEKSProtection: !Equals [ !Ref 'EKSProtection', true ] Resources: # Optional Default Organization CloudTrail @@ -1085,7 +1194,7 @@ Resources: - !Sub arn:${AWS::Partition}:s3:::${StagingS3Bucket}/* Condition: StringNotEquals: - aws:PrincipalOrgID: !GetAtt OrgIdLambdaCustomResource.OrganizationId + aws:PrincipalOrgID: !GetAtt OrgIdLambdaCustomResource.organization_id - Sid: SecureTransport Effect: Deny @@ -1110,6 +1219,9 @@ Resources: - lambda_functions/packages/register-organization/lambda.zip - lambda_functions/packages/cw-helper/lambda.zip - templates/crowdstrike_init_stack.yaml + - lambda_functions/packages/eks-existing-clusters/lambda.zip + - lambda_functions/packages/eks-new-clusters/lambda.zip + - lambda_functions/packages/codebuild/lambda.zip CopyZipsRole: Type: AWS::IAM::Role @@ -1276,6 +1388,8 @@ Resources: Environment: Variables: LOG_LEVEL: "INFO" + EVENTBUS_ACCOUNT: !Ref EventBusAccount + EKS_PROTECTION: !Ref EKSProtection Code: S3Bucket: !Ref StagingS3Bucket S3Key: !Sub ${SourceS3BucketNamePrefix}/lambda_functions/packages/cw-helper/lambda.zip @@ -1312,7 +1426,9 @@ Resources: Statement: - Sid: OrganizationRead Effect: Allow - Action: organizations:DescribeOrganization + Action: + - organizations:DescribeOrganization + - organizations:ListParents Resource: '*' - PolicyName: CloudWatchLogGroup PolicyDocument: @@ -1326,6 +1442,171 @@ Resources: - logs:PutLogEvents Resource: !Sub arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${OrgIdLambdaFunctionName}:log-stream:* + # EKS Protection + RootRolesStackSet: + Condition: EnableEKSProtection + Type: AWS::CloudFormation::StackSet + Properties: + StackSetName: crowdstrike-eks-protection-root-roles + Capabilities: + - CAPABILITY_NAMED_IAM + AdministrationRoleARN: !GetAtt StackSetAdministrationRole.Arn + ExecutionRoleName: !Ref StackSetExecRole + Parameters: + - ParameterKey: CodeBuildProjectName + ParameterValue: !Ref CodeBuildProjectName + - ParameterKey: CodeBuildRoleName + ParameterValue: !Ref CodeBuildRoleName + - ParameterKey: EventBridgeRoleName + ParameterValue: !Ref EventBridgeRoleName + - ParameterKey: EKSExecutionRoleName + ParameterValue: !Ref EKSExecutionRoleName + - ParameterKey: StagingS3Bucket + ParameterValue: !Ref StagingS3Bucket + - ParameterKey: EventBusName + ParameterValue: !Ref EventBusName + - ParameterKey: PermissionsBoundary + ParameterValue: !Ref PermissionsBoundary + PermissionModel: SELF_MANAGED + StackInstancesGroup: + - DeploymentTargets: + AccountFilterType: NONE + Accounts: + - !Ref EventBusAccount + Regions: + - !Ref AWS::Region + TemplateURL: !Sub https://${SourceS3BucketName}.s3.${S3BucketRegion}.amazonaws.com/${SourceS3BucketNamePrefix}/templates/eks-root-roles.yml + + EKSTargetRolesStackSet: + Condition: EnableEKSProtection + DependsOn: RootRolesStackSet + Type: AWS::CloudFormation::StackSet + Properties: + StackSetName: crowdstrike-eks-protection-target-roles + Capabilities: + - CAPABILITY_NAMED_IAM + Parameters: + - ParameterKey: EKSExecutionRoleName + ParameterValue: !Ref EKSExecutionRoleName + - ParameterKey: EventBridgeRoleName + ParameterValue: !Ref EventBridgeRoleName + - ParameterKey: EventBusAccount + ParameterValue: !Ref EventBusAccount + - ParameterKey: EventBusName + ParameterValue: !Ref EventBusName + - ParameterKey: CodeBuildRoleName + ParameterValue: !Ref CodeBuildRoleName + - ParameterKey: PermissionsBoundary + ParameterValue: !Ref PermissionsBoundary + PermissionModel: SERVICE_MANAGED + AutoDeployment: + Enabled: true + RetainStacksOnAccountRemoval: false + OperationPreferences: + MaxConcurrentPercentage: 100 + FailureTolerancePercentage: 50 + RegionConcurrencyType: PARALLEL + StackInstancesGroup: + - DeploymentTargets: + AccountFilterType: NONE + OrganizationalUnitIds: !Ref ProvisionOU + Regions: + - !Ref AWS::Region + TemplateURL: !Sub https://${SourceS3BucketName}.s3.${S3BucketRegion}.amazonaws.com/${SourceS3BucketNamePrefix}/templates/eks-target-roles-stackset.yml + + RootEKSProtectionStackSet: + Condition: EnableEKSProtection + DependsOn: EKSTargetRolesStackSet + Type: AWS::CloudFormation::StackSet + Properties: + StackSetName: crowdstrike-eks-protection-root-protection + Capabilities: + - CAPABILITY_NAMED_IAM + AdministrationRoleARN: !GetAtt StackSetAdministrationRole.Arn + ExecutionRoleName: !Ref StackSetExecRole + Parameters: + - ParameterKey: StagingS3Bucket + ParameterValue: !Ref StagingS3Bucket + - ParameterKey: Backend + ParameterValue: !Ref Backend + - ParameterKey: Registry + ParameterValue: !Ref Registry + - ParameterKey: EnableKAC + ParameterValue: !Ref EnableKAC + - ParameterKey: CrowdStrikeCloud + ParameterValue: !Ref CSCloud + - ParameterKey: EventBusName + ParameterValue: !Ref EventBusName + - ParameterKey: OrganizationId + ParameterValue: !GetAtt OrgIdLambdaCustomResource.organization_id + - ParameterKey: FalconClientId + ParameterValue: !Ref FalconClientID + - ParameterKey: FalconClientSecret + ParameterValue: !Ref FalconSecret + - ParameterKey: KubernetesUserName + ParameterValue: !Ref KubernetesUserName + - ParameterKey: FalconCID + ParameterValue: !Ref FalconCID + - ParameterKey: DockerAPIToken + ParameterValue: !Ref DockerAPIToken + - ParameterKey: EKSExecutionRoleName + ParameterValue: !Ref EKSExecutionRoleName + - ParameterKey: CodeBuildProjectName + ParameterValue: !Ref CodeBuildProjectName + - ParameterKey: CodeBuildRoleName + ParameterValue: !Ref CodeBuildRoleName + - ParameterKey: VpcCIDR + ParameterValue: '10.192.0.0/16' + - ParameterKey: PublicSubnet1CIDR + ParameterValue: '10.192.10.0/24' + - ParameterKey: PublicSubnet2CIDR + ParameterValue: '10.192.11.0/24' + - ParameterKey: PrivateSubnet1CIDR + ParameterValue: '10.192.20.0/24' + - ParameterKey: PrivateSubnet2CIDR + ParameterValue: '10.192.21.0/24' + PermissionModel: SELF_MANAGED + StackInstancesGroup: + - DeploymentTargets: + AccountFilterType: NONE + Accounts: + - !Ref EventBusAccount + OrganizationalUnitIds: + - !GetAtt OrgIdLambdaCustomResource.eventbus_account_ou + Regions: + - !Ref AWS::Region + TemplateURL: !Sub https://${SourceS3BucketName}.s3.${S3BucketRegion}.amazonaws.com/${SourceS3BucketNamePrefix}/templates/eks-protection-stack.yml + + EKSEventBridgeStackSet: + Condition: EnableEKSProtection + DependsOn: RootEKSProtectionStackSet + Type: AWS::CloudFormation::StackSet + Properties: + StackSetName: crowdstrike-eks-protection-eventbridge + Parameters: + - ParameterKey: EventBusName + ParameterValue: !Ref EventBusName + - ParameterKey: EventBusAccount + ParameterValue: !Ref EventBusAccount + - ParameterKey: EventBusRegion + ParameterValue: !Ref AWS::Region + - ParameterKey: EventBridgeRoleName + ParameterValue: !Ref EventBridgeRoleName + PermissionModel: SERVICE_MANAGED + AutoDeployment: + Enabled: true + RetainStacksOnAccountRemoval: true + OperationPreferences: + MaxConcurrentPercentage: 100 + FailureTolerancePercentage: 50 + RegionConcurrencyType: PARALLEL + StackInstancesGroup: + - DeploymentTargets: + AccountFilterType: NONE + OrganizationalUnitIds: !Ref ProvisionOU + Regions: !GetAtt TriggerRegisterAccountLambda.my_regions + TemplateURL: !Sub https://${SourceS3BucketName}.s3.${S3BucketRegion}.amazonaws.com/${SourceS3BucketNamePrefix}/templates/eks-eventbridge-stackset.yml + Outputs: CSBucket: Condition: CreateIOATrail diff --git a/templates/eks-eventbridge-stackset.yml b/templates/eks-eventbridge-stackset.yml new file mode 100644 index 0000000..12f63a4 --- /dev/null +++ b/templates/eks-eventbridge-stackset.yml @@ -0,0 +1,35 @@ +--- +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + EventBusRegion: + Type: String + EventBusAccount: + Type: String + EventBusName: + Type: String + EventBridgeRoleName: + Type: String + +Resources: + EKSRule: + Type: AWS::Events::Rule + Properties: + Description: "Invoke CrowdStrike EKS Lambda when clusters or Fargate profiles are created." + EventPattern: + source: + - "aws.eks" + detail-type: + - "AWS API Call via CloudTrail" + detail: + eventSource: + - "eks.amazonaws.com" + eventName: + - "CreateCluster" + - "CreateFargateProfile" + State: "ENABLED" + Name: crowdstrike-eks-rule + Targets: + - Arn: !Sub arn:${AWS::Partition}:events:${EventBusRegion}:${EventBusAccount}:event-bus/${EventBusName} + Id: "CrowdStrikeEKSEventBus" + RoleArn: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${EventBridgeRoleName} diff --git a/templates/eks-protection-stack.yml b/templates/eks-protection-stack.yml new file mode 100644 index 0000000..ee348b7 --- /dev/null +++ b/templates/eks-protection-stack.yml @@ -0,0 +1,543 @@ +--- +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + StagingS3Bucket: + Type: String + Backend: + Type: String + Description: kernel or bpf for Daemonset Sensor + AllowedValues: + - 'kernel' + - 'bpf' + Default: 'kernel' + Registry: + Type: String + Description: Source Falcon Image from CrowdStrike or mirror to ECR + AllowedValues: + - 'crowdstrike' + - 'ecr' + Default: 'crowdstrike' + EnableKAC: + Type: String + Description: Deploy Kubernetes Admission Controller (KAC). For more info see https://falcon.crowdstrike.com/documentation/page/aa4fccee/container-security#s41cbec3 + AllowedValues: + - 'true' + - 'false' + Default: 'true' + CrowdStrikeCloud: + Type: String + Description: Cloud for your Falcon CID (eg. us-1, us-2 or eu-1) + AllowedValues: + - 'us-1' + - 'us-2' + - 'eu-1' + Default: 'us-1' + EventBusName: + Type: String + Default: "crowdstrike-eks-eventbus" + OrganizationId: + Type: String + FalconClientId: + Type: String + NoEcho: true + FalconClientSecret: + Type: String + NoEcho: true + KubernetesUserName: + Type: String + Default: crowdstrike-eks + FalconCID: + Type: String + DockerAPIToken: + Type: String + NoEcho: true + EKSExecutionRoleName: + Type: String + Default: crowdstrike-eks-execution-role + CodeBuildProjectName: + Type: String + Default: crowdstrike-eks-codebuild + CodeBuildRoleName: + Type: String + Default: crowdstrike-eks-codebuild-role + EnvironmentName: + Description: An environment name that is prefixed to resource names + Type: String + Default: crowdstrike-eks + VpcCIDR: + Description: Please enter the IP range (CIDR notation) for this VPC + Type: String + Default: 10.192.0.0/16 + PublicSubnet1CIDR: + Description: Please enter the IP range (CIDR notation) for the public subnet in the first Availability Zone + Type: String + Default: 10.192.10.0/24 + PublicSubnet2CIDR: + Description: Please enter the IP range (CIDR notation) for the public subnet in the second Availability Zone + Type: String + Default: 10.192.11.0/24 + PrivateSubnet1CIDR: + Description: Please enter the IP range (CIDR notation) for the private subnet in the first Availability Zone + Type: String + Default: 10.192.20.0/24 + PrivateSubnet2CIDR: + Description: Please enter the IP range (CIDR notation) for the private subnet in the second Availability Zone + Type: String + Default: 10.192.21.0/24 + SourceS3BucketNamePrefix: + Description: + Staging S3 bucket name prefix for the artifacts relevant to the solutions. (e.g., lambda zips, CloudFormation templates). The account + and region are added to the prefix --. Example = staging-123456789012-us-east-1. + Type: String + +Conditions: + UseECR: !Equals [ !Ref 'Registry', 'ecr' ] + +Resources: + # EventBridge Resources to Invoke Lambda + EKSEventBus: + Type: AWS::Events::EventBus + Properties: + Name: !Ref EventBusName + Policy: !Sub | + { + "Version": "2012-10-17", + "Statement": [{ + "Sid": "AllowAllAccountsFromOrganizationToPutEvents", + "Effect": "Allow", + "Principal": "*", + "Action": "events:PutEvents", + "Resource": "arn:aws:events:${AWS::Region}:${AWS::AccountId}:event-bus/${EventBusName}", + "Condition": { + "StringEquals": { + "aws:PrincipalOrgID": "${OrganizationId}" + } + } + }] + } + EKSRule: + Type: AWS::Events::Rule + Properties: + Description: "EventRule" + EventBusName: !GetAtt EKSEventBus.Arn + EventPattern: + source: + - "aws.eks" + detail-type: + - "AWS API Call via CloudTrail" + detail: + eventSource: + - "eks.amazonaws.com" + eventName: + - "CreateCluster" + - "CreateFargateProfile" + State: "ENABLED" + Name: crowdstrike-eks-rule + Targets: + - Arn: !GetAtt EKSLambda.Arn + Id: "CrowdStrikeEKSFunction" + + # Lambda Function to process event, check for EKS API Access and StartBuild + EKSLambda: + Type: AWS::Lambda::Function + Metadata: + cfn_nag: + rules_to_suppress: + - id: W89 + reason: Lambda custom resource only run during stack lifecycle events. + - id: W92 + reason: Lambda custom resource only run during stack lifecycle events. + checkov: + skip: + - id: CKV_AWS_115 + comment: Lambda does not need reserved concurrent executions. + - id: CKV_AWS_116 + comment: DLQ not needed, as Lambda function only triggered by CloudFormation events. + - id: CKV_AWS_117 + comment: Lambda does not need to communicate with VPC resources. + - id: CKV_AWS_173 + comment: Environment variables are not sensitive. + Properties: + Environment: + Variables: + project_name: !Ref CodeBuildProjectName + artifact_bucket: !Ref StagingS3Bucket + lambda_switch_role: !Ref EKSExecutionRoleName + Handler: lambda_function.lambda_handler + MemorySize: 128 + Role: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${EKSExecutionRoleName} + Runtime: python3.11 + Timeout: 300 + FunctionName: crowdstrike-abi-eks-events-function + Code: + S3Bucket: !Ref StagingS3Bucket + S3Key: !Sub ${SourceS3BucketNamePrefix}/lambda_functions/packages/eks-new-clusters/lambda.zip + EKSInvokeLambdaPermission: + Type: AWS::Lambda::Permission + Properties: + FunctionName: !Ref EKSLambda + Action: "lambda:InvokeFunction" + Principal: "events.amazonaws.com" + SourceArn: !GetAtt EKSRule.Arn + ExistingEKSLambda: + Type: AWS::Lambda::Function + Metadata: + cfn_nag: + rules_to_suppress: + - id: W89 + reason: Lambda custom resource only run during stack lifecycle events. + - id: W92 + reason: Lambda custom resource only run during stack lifecycle events. + checkov: + skip: + - id: CKV_AWS_115 + comment: Lambda does not need reserved concurrent executions. + - id: CKV_AWS_116 + comment: DLQ not needed, as Lambda function only triggered by CloudFormation events. + - id: CKV_AWS_117 + comment: Lambda does not need to communicate with VPC resources. + - id: CKV_AWS_173 + comment: Environment variables are not sensitive. + Properties: + Environment: + Variables: + project_name: !Ref CodeBuildProjectName + artifact_bucket: !Ref StagingS3Bucket + lambda_switch_role: !Ref EKSExecutionRoleName + Handler: lambda_function.lambda_handler + MemorySize: 128 + Role: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${EKSExecutionRoleName} + Runtime: python3.11 + Timeout: 300 + FunctionName: crowdstrike-abi-eks-init-function + Code: + S3Bucket: !Ref StagingS3Bucket + S3Key: !Sub ${SourceS3BucketNamePrefix}/lambda_functions/packages/eks-existing-clusters/lambda.zip + # Trigger Lambda Function + TriggerExistingEKSLambda: + Type: 'Custom::TriggerLambda' + DependsOn: + - SMFalconAPIKey + Properties: + ServiceToken: !GetAtt + - ExistingEKSLambda + - Arn + + # CodeBuild Project to deploy Falcon Operator and Sensor + EKSCodeBuild: + Type: AWS::CodeBuild::Project + Properties: + Description: "Triggered by CrowdStrike EKS Lambda to provision Falcon Operator and Sensor" + Environment: + ComputeType: BUILD_GENERAL1_SMALL + EnvironmentVariables: + - Name: FALCON_CLIENT_ID + Value: !Sub + - '${arn}:client_id' + - { arn: !Ref SMFalconAPIKey } + Type: SECRETS_MANAGER + - Name: FALCON_CLIENT_SECRET + Value: !Sub + - '${arn}:client_secret' + - { arn: !Ref SMFalconAPIKey } + Type: SECRETS_MANAGER + - Name: CS_CLOUD + Value: !Sub + - '${arn}:cs_cloud' + - { arn: !Ref SMFalconAPIKey } + Type: SECRETS_MANAGER + - Name: DOCKER_API_TOKEN + Value: !Sub + - '${arn}:docker_api_token' + - { arn: !Ref SMFalconAPIKey } + Type: SECRETS_MANAGER + - Name: PRINCIPAL_ARN + Type: PLAINTEXT + Value: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${CodeBuildRoleName} + - Name: S3_BUCKET + Type: PLAINTEXT + Value: !Ref StagingS3Bucket + - Name: USERNAME + Type: PLAINTEXT + Value: !Ref KubernetesUserName + - Name: CLUSTER + Type: PLAINTEXT + Value: lambda + - Name: CLUSTER_ARN + Type: PLAINTEXT + Value: lambda + - Name: NODE_TYPE + Type: PLAINTEXT + Value: lambda + - Name: CID + Type: PLAINTEXT + Value: !Ref FalconCID + - Name: ENABLE_KAC + Type: PLAINTEXT + Value: !Ref EnableKAC + - Name: REGISTRY + Type: PLAINTEXT + Value: !Ref Registry + - Name: CROWDSTRIKE_CLOUD + Type: PLAINTEXT + Value: !Ref CrowdStrikeCloud + - Name: BACKEND + Type: PLAINTEXT + Value: !Ref Backend + - Name: IMAGE_REGION + Type: PLAINTEXT + Value: !If [ UseECR, !Ref AWS::Region, 'CrowdStrike' ] + - Name: NODE_SENSOR_REPO + Type: PLAINTEXT + Value: !If [ UseECR, !Ref NodeSensorRepo, 'CrowdStrike' ] + - Name: NODE_SENSOR_URI + Type: PLAINTEXT + Value: !If [ UseECR, !GetAtt NodeSensorRepo.RepositoryUri, 'CrowdStrike' ] + - Name: ACCOUNT_ID + Type: PLAINTEXT + Value: lambda + - Name: REGION + Type: PLAINTEXT + Value: lambda + - Name: SWITCH_ROLE + Type: PLAINTEXT + Value: !Ref EKSExecutionRoleName + - Name: NAT_IP + Type: PLAINTEXT + Value: !GetAtt NatGatewayEIP.PublicIp + Image: aws/codebuild/standard:7.0 + PrivilegedMode: true + Type: LINUX_CONTAINER + VpcConfig: + SecurityGroupIds: + - !GetAtt NoIngressSecurityGroup.GroupId + Subnets: + - !GetAtt PrivateSubnet1.SubnetId + VpcId: !GetAtt PrivateSubnet1.VpcId + Artifacts: + Type: NO_ARTIFACTS + Name: !Ref CodeBuildProjectName + ServiceRole: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${CodeBuildRoleName} + Source: + Location: !Sub '${StagingS3Bucket}/${SourceS3BucketNamePrefix}/lambda_functions/packages/codebuild/lambda.zip' + Type: S3 + TimeoutInMinutes: 90 + Visibility: PRIVATE + +# Secret + SMFalconAPIKey: + Type: AWS::SecretsManager::Secret + Metadata: + checkov: + skip: + - id: CKV_AWS_149 + comment: The default key aws/secretsmanager is sufficient to secure this resource + Properties: + Name: crowdstrike-eks-secret + Description: 'Client ID and Secret for the Falcon API' + SecretString: !Sub | + { + "client_id": "${FalconClientId}", + "client_secret": "${FalconClientSecret}", + "cid": "${FalconCID}", + "cs_cloud":"${CrowdStrikeCloud}", + "docker_api_token":"${DockerAPIToken}" + } + + # Create ECR Repositories for Falcon Images + NodeSensorRepo: + Type: AWS::ECR::Repository + Condition: UseECR + Properties: + ImageScanningConfiguration: + ScanOnPush: true + EmptyOnDelete: true + RepositoryName: crowdstrike/falcon-sensor + RepositoryPolicyText: + Version: "2012-10-17" + Statement: + - + Sid: AllowPushPull + Effect: Allow + Principal: + AWS: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${CodeBuildRoleName} + Action: + - "ecr:GetDownloadUrlForLayer" + - "ecr:BatchGetImage" + - "ecr:BatchCheckLayerAvailability" + - "ecr:PutImage" + - "ecr:InitiateLayerUpload" + - "ecr:UploadLayerPart" + - "ecr:CompleteLayerUpload" + KPAgentRepo: + Type: AWS::ECR::Repository + Condition: UseECR + Properties: + ImageScanningConfiguration: + ScanOnPush: true + EmptyOnDelete: true + RepositoryName: crowdstrike/kpagent + RepositoryPolicyText: + Version: "2012-10-17" + Statement: + - + Sid: AllowPushPull + Effect: Allow + Principal: + AWS: + - !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/${CodeBuildRoleName} + Action: + - "ecr:GetDownloadUrlForLayer" + - "ecr:BatchGetImage" + - "ecr:BatchCheckLayerAvailability" + - "ecr:PutImage" + - "ecr:InitiateLayerUpload" + - "ecr:UploadLayerPart" + - "ecr:CompleteLayerUpload" + + # Network + VPC: + Type: AWS::EC2::VPC + Properties: + CidrBlock: !Ref VpcCIDR + EnableDnsSupport: true + EnableDnsHostnames: true + Tags: + - Key: Name + Value: !Ref EnvironmentName + + InternetGateway: + Type: AWS::EC2::InternetGateway + Properties: + Tags: + - Key: Name + Value: !Ref EnvironmentName + + InternetGatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Properties: + InternetGatewayId: !Ref InternetGateway + VpcId: !Ref VPC + + PublicSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + AvailabilityZone: !Select [ 0, !GetAZs '' ] + CidrBlock: !Ref PublicSubnet1CIDR + MapPublicIpOnLaunch: false + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Public Subnet (AZ1) + + PublicSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + AvailabilityZone: !Select [ 1, !GetAZs '' ] + CidrBlock: !Ref PublicSubnet2CIDR + MapPublicIpOnLaunch: false + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Public Subnet (AZ2) + + PrivateSubnet1: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + AvailabilityZone: !Select [ 0, !GetAZs '' ] + CidrBlock: !Ref PrivateSubnet1CIDR + MapPublicIpOnLaunch: false + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Private Subnet (AZ1) + + PrivateSubnet2: + Type: AWS::EC2::Subnet + Properties: + VpcId: !Ref VPC + AvailabilityZone: !Select [ 1, !GetAZs '' ] + CidrBlock: !Ref PrivateSubnet2CIDR + MapPublicIpOnLaunch: false + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Private Subnet (AZ2) + + NatGatewayEIP: + Type: AWS::EC2::EIP + DependsOn: InternetGatewayAttachment + Properties: + Domain: vpc + + NatGateway: + Type: AWS::EC2::NatGateway + Properties: + AllocationId: !GetAtt NatGatewayEIP.AllocationId + SubnetId: !Ref PublicSubnet1 + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Public Routes + + DefaultPublicRoute: + Type: AWS::EC2::Route + DependsOn: InternetGatewayAttachment + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: 0.0.0.0/0 + GatewayId: !Ref InternetGateway + + PublicSubnet1RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PublicRouteTable + SubnetId: !Ref PublicSubnet1 + + PublicSubnet2RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PublicRouteTable + SubnetId: !Ref PublicSubnet2 + + PrivateRouteTable: + Type: AWS::EC2::RouteTable + Properties: + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub ${EnvironmentName} Private Routes (AZ1) + + DefaultPrivateRoute: + Type: AWS::EC2::Route + Properties: + RouteTableId: !Ref PrivateRouteTable + DestinationCidrBlock: 0.0.0.0/0 + NatGatewayId: !Ref NatGateway + + PrivateSubnet1RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnet1 + + PrivateSubnet2RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Properties: + RouteTableId: !Ref PrivateRouteTable + SubnetId: !Ref PrivateSubnet2 + + NoIngressSecurityGroup: + Type: AWS::EC2::SecurityGroup + Properties: + GroupDescription: "Security group with no ingress rule" + VpcId: !Ref VPC + SecurityGroupEgress: + - IpProtocol: -1 + CidrIp: 0.0.0.0/0 + Description: "Allow all egress" \ No newline at end of file diff --git a/templates/eks-root-roles.yml b/templates/eks-root-roles.yml new file mode 100644 index 0000000..d8b2f7c --- /dev/null +++ b/templates/eks-root-roles.yml @@ -0,0 +1,237 @@ +--- +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + CodeBuildProjectName: + Type: String + Default: "crowdstrike-eks-codebuild" + CodeBuildRoleName: + Type: String + Default: "crowdstrike-eks-codebuild-role" + EventBridgeRoleName: + Type: String + Default: "crowdstrike-eks-eventbridge-role" + EKSExecutionRoleName: + Type: String + Default: "crowdstrike-eks-execution-role" + StagingS3Bucket: + Type: String + EventBusName: + Type: String + Default: "crowdstrike-eks-eventbus" + PermissionsBoundary: + Type: String + Description: The name of the policy used to set the permissions boundary for IAM roles. + Default: '' + +Conditions: + SetPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundary, '' ] ] + +Resources: + EKSEventBridgeRole: + Type: AWS::IAM::Role + Properties: + RoleName: !Ref EventBridgeRoleName + PermissionsBoundary: + Fn::If: + - SetPermissionsBoundary + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${PermissionsBoundary}' + - Ref: AWS::NoValue + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: events.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: crowdstrike-eks-eb-role + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: events:PutEvents + Resource: !Sub arn:${AWS::Partition}:events:${AWS::Region}:${AWS::AccountId}:event-bus/${EventBusName} + + EKSExecutionRole: + Type: 'AWS::IAM::Role' + Properties: + RoleName: !Ref EKSExecutionRoleName + PermissionsBoundary: + Fn::If: + - SetPermissionsBoundary + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${PermissionsBoundary}' + - Ref: AWS::NoValue + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + - lambda.amazonaws.com + Action: + - 'sts:AssumeRole' + ManagedPolicyArns: + - !Sub 'arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' + Policies: + - PolicyName: crowdstrike-eks-execution-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - organizations:ListAccounts + Resource: '*' + - Effect: Allow + Action: + - ec2:DescribeRegions + Resource: '*' + - Effect: Allow + Action: + - codebuild:StartBuild + Resource: + - !Sub 'arn:${AWS::Partition}:codebuild:*:${AWS::AccountId}:project/${CodeBuildProjectName}' + - Effect: Allow + Action: + - sts:AssumeRole + Resource: + - !Sub arn:${AWS::Partition}:iam::*:role/${EKSExecutionRoleName} + + EKSCodeBuildRole: + Type: 'AWS::IAM::Role' + Metadata: + cfn-lint: + config: + ignore_checks: + - EIAMPolicyWildcardResource # Role has * to allow for future service monitoring without stack updates + - EIAMPolicyActionWildcard # Role has * to allow for future service monitoring without stack updates + checkov: + skip: + - id: CKV_AWS_109 + comment: IAM PassRole action is constrained by resource ARN. + - id: CKV_AWS_111 + comment: IAM PassRole action is constrained by resource ARN. + Properties: + RoleName: !Ref CodeBuildRoleName + PermissionsBoundary: + Fn::If: + - SetPermissionsBoundary + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${PermissionsBoundary}' + - Ref: AWS::NoValue + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + - codebuild.amazonaws.com + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: codebuild-base-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + Resource: + - !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/${CodeBuildProjectName}' + - !Sub 'arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/codebuild/${CodeBuildProjectName}:*' + - Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketAcl + - s3:GetBucketLocation + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::codepipeline-${AWS::Region}-*' + - Effect: Allow + Action: + - codebuild:CreateReportGroup + - codebuild:CreateReport + - codebuild:UpdateReport + - codebuild:BatchPutTestCases + - codebuild:BatchPutCodeCoverages + Resource: + - !Sub 'arn:${AWS::Partition}:codebuild:${AWS::Region}:${AWS::AccountId}:report-group/${CodeBuildProjectName}-*' + - PolicyName: codebuild-s3-readonly-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - s3:GetObject + - s3:GetObjectVersion + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${StagingS3Bucket}/buildspec.yml' + - !Sub 'arn:${AWS::Partition}:s3:::${StagingS3Bucket}/*' + - Effect: Allow + Action: + - s3:ListBucket + - s3:GetBucketAcl + - s3:GetBucketLocation + Resource: + - !Sub 'arn:${AWS::Partition}:s3:::${StagingS3Bucket}' + - PolicyName: codebuild-eks-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - eks:DescribeFargateProfile + - eks:ListAccessEntries + - eks:ListAssociatedAccessPolicies + - eks:DescribeNodegroup + - eks:ListIdentityProviderConfigs + - eks:ListNodegroups + - eks:DescribeAccessEntry + - eks:ListFargateProfiles + - eks:DescribeIdentityProviderConfig + - eks:DescribeUpdate + - eks:AccessKubernetesApi + - eks:DescribeCluster + - eks:ListClusters + - eks:ListAccessPolicies + - eks:AssociateIdentityProviderConfig + - eks:AssociateAccessPolicy + - eks:DisassociateAccessPolicy + - ecr:GetAuthorizationToken + - ecr:ListImages + - ecr:InitiateLayerUpload + - ecr:CreateRepository + - ecr:CompleteLayerUpload + - ecr:PutImage + - iam:CreateOpenIDConnectProvider + - iam:GetOpenIDConnectProvider + - ec2:CreateNetworkInterface + - ec2:CreateNetworkInterfacePermission + - ec2:DeleteNetworkInterface + - ec2:DescribeDhcpOptions + - ec2:DescribeNetworkInterfaces + - ec2:DeleteNetworkInterface + - ec2:DescribeSubnets + - ec2:DescribeSecurityGroups + - ec2:DescribeVpcs + Resource: '*' + - Effect: Allow + Action: + - eks:UpdateAccessEntry + - eks:CreateAccessEntry + - eks:DeleteAccessEntry + Resource: + - !Sub 'arn:${AWS::Partition}:eks:*:*:cluster/*' + - !Sub 'arn:${AWS::Partition}:eks:*:*:access-entry/*/${CodeBuildRoleName}/${AWS::AccountId}/$/*' + - Effect: Allow + Action: + - secretsmanager:GetSecretValue + Resource: + - !Sub 'arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:crowdstrike-eks-secret-*' + - Effect: Allow + Action: + - sts:AssumeRole + Resource: + - !Sub arn:${AWS::Partition}:iam::*:role/${EKSExecutionRoleName} diff --git a/templates/eks-target-roles-stackset.yml b/templates/eks-target-roles-stackset.yml new file mode 100644 index 0000000..d7ab1b7 --- /dev/null +++ b/templates/eks-target-roles-stackset.yml @@ -0,0 +1,113 @@ +--- +AWSTemplateFormatVersion: '2010-09-09' + +Parameters: + EKSExecutionRoleName: + Type: String + Default: "crowdstrike-eks-execution-role" + EventBridgeRoleName: + Type: String + Default: "crowdstrike-eks-eventbridge-role" + EventBusAccount: + Type: String + EventBusName: + Type: String + Default: "crowdstrike-eks-eventbus" + CodeBuildRoleName: + Type: String + Default: "crowdstrike-eks-codebuild-role" + PermissionsBoundary: + Type: String + Description: The name of the policy used to set the permissions boundary for IAM roles. + Default: '' + +Conditions: + SetPermissionsBoundary: !Not [ !Equals [ !Ref PermissionsBoundary, '' ] ] + +Resources: + EventBridgeRole: + Type: 'AWS::IAM::Role' + Properties: + RoleName: !Ref EventBridgeRoleName + PermissionsBoundary: + Fn::If: + - SetPermissionsBoundary + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${PermissionsBoundary}' + - Ref: AWS::NoValue + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + - events.amazonaws.com + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: crowdstrike-eks-eventbridge-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - events:PutEvents + Resource: + - !Sub arn:${AWS::Partition}:events:${AWS::Region}:${EventBusAccount}:event-bus/${EventBusName} + + EKSExecutionRole: + Type: 'AWS::IAM::Role' + Metadata: + cfn-lint: + config: + ignore_checks: + - EIAMPolicyWildcardResource # Role has * to allow for future service monitoring without stack updates + - EIAMPolicyActionWildcard # Role has * to allow for future service monitoring without stack updates + checkov: + skip: + - id: CKV_AWS_111 + comment: constraints must be suppressed due to resource arns only known at execution + Properties: + RoleName: !Ref EKSExecutionRoleName + PermissionsBoundary: + Fn::If: + - SetPermissionsBoundary + - !Sub 'arn:${AWS::Partition}:iam::${AWS::AccountId}:policy/${PermissionsBoundary}' + - Ref: AWS::NoValue + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + AWS: + - !Sub arn:${AWS::Partition}:iam::${EventBusAccount}:role/${EKSExecutionRoleName} + - !Sub arn:${AWS::Partition}:sts::${EventBusAccount}:role/${CodeBuildRoleName} + Action: + - 'sts:AssumeRole' + Policies: + - PolicyName: crowdstrike-eks-execution-policy + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:ListFargateProfiles + - ec2:DescribeRegions + - eks:UpdateClusterConfig + - eks:DescribeUpdate + - eks:AssociateAccessPolicy + Resource: '*' + - Effect: Allow + Action: + - ec2:DescribeRegions + Resource: '*' + - Effect: Allow + Action: + - eks:UpdateAccessEntry + - eks:CreateAccessEntry + - eks:DeleteAccessEntry + - eks:DescribeAccessEntry + Resource: + - !Sub 'arn:${AWS::Partition}:eks:*:${AWS::AccountId}:cluster/*' + - !Sub 'arn:${AWS::Partition}:eks:*:${AWS::AccountId}:access-entry/*/${CodeBuildRoleName}/${EventBusAccount}/$/*'