Skip to content

feat: add eks cluster gha #9

feat: add eks cluster gha

feat: add eks cluster gha #9

name: Aurora Cluster creation and destruction test
on:
schedule:
- cron: '0 2 * * 1' # At 02:00 on Monday.
workflow_dispatch:
inputs:
cluster_name:
description: "Aurora Cluster name."
required: false
type: string
delete_cluster:
description: "Whether to delete the Aurora cluster."
required: false
type: boolean
default: true
db_username:
description: "Database username."
required: false
type: string
db_password:
description: "Database password."
required: false
type: string
vpc-id:
description: "VPC ID to create the cluster in."
required: false
type: string
default: ''
subnet-ids:
description: "List of subnet IDs to create the cluster in."
required: false
type: string
default: ''
cidr-blocks:
description: "CIDR blocks to allow access from and to."
required: false
type: string
default: ''
pull_request:
paths:
- modules/fixtures/backend.tf
- modules/fixtures/fixtures.default.aurora.tfvars
- modules/aurora/**.tf
- .tool-versions
- .github/workflows/test-gha-aurora-manage-cluster.yml
- .github/actions/aurora-manage-cluster/*.yml
- justfile
concurrency:
group: "${{ github.workflow }}-${{ github.ref }}"
cancel-in-progress: true
env:
AWS_PROFILE: "infex"
AWS_REGION: "eu-west-2"
# please keep those synced with tests.yml
TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1"
TF_STATE_BUCKET_REGION: "eu-central-1"
jobs:
action-test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}
fetch-depth: 0
- name: Install tooling using asdf
uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3
- name: Import Secrets
id: secrets
uses: hashicorp/vault-action@v3
with:
url: ${{ secrets.VAULT_ADDR }}
method: approle
roleId: ${{ secrets.VAULT_ROLE_ID }}
secretId: ${{ secrets.VAULT_SECRET_ID }}
exportEnv: false
secrets: |
secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY;
secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY;
- name: Add profile credentials to ~/.aws/credentials
run: |
aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }}
aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }}
aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }}
- name: Get Cluster Info
id: commit_info
run: |
if [[ -n "${{ inputs.cluster_name }}" ]]; then
cluster_name="${{ inputs.cluster_name }}"
else
cluster_name="aurora-$(git rev-parse --short HEAD)"
fi
if [[ -n "${{ inputs.db_username }}" ]]; then
db_username="${{ inputs.db_username }}"
else
db_username="user$(openssl rand -hex 4)"
fi
if [[ -n "${{ inputs.db_password }}" ]]; then
db_password="${{ inputs.db_password }}"
else
db_password="$(openssl rand -base64 12)"
fi
if [[ -n "${{ inputs.vpc-id }}" ]]; then
vpc_id="${{ inputs.vpc-id }}"
else
vpc_id="$(aws ec2 describe-vpcs --query 'Vpcs[?IsDefault].VpcId' --output text --profile ${{ env.AWS_PROFILE }})"
fi
if [[ -n "${{ inputs.subnet-ids }}" ]]; then
subnet_ids="${{ inputs.subnet-ids }}"
else
subnet_ids=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query "Subnets[*].SubnetId" --output json --profile "${{ env.AWS_PROFILE }}")
fi
if [[ -n "${{ inputs.cidr-blocks }}" ]]; then
cidr_blocks="${{ inputs.cidr-blocks }}"
else
cidr_blocks='["0.0.0.0/0"]' # Default CIDR to allow access from anywhere
fi
echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT"
echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT"
echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT"
echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT"
echo "subnet_ids=$subnet_ids" | tee -a "$GITHUB_OUTPUT"
echo "cidr_blocks=$cidr_blocks" | tee -a "$GITHUB_OUTPUT"
tf_modules_revision=$(git rev-parse HEAD)
echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT"
- name: Create Aurora Cluster
timeout-minutes: 125
uses: ./.github/actions/aurora-manage-cluster
id: create_cluster
with:
cluster-name: ${{ steps.commit_info.outputs.cluster_name }}
username: ${{ steps.commit_info.outputs.db_username }}
password: ${{ steps.commit_info.outputs.db_password }}
aws-region: ${{ env.AWS_REGION }}
s3-backend-bucket: ${{ env.TF_STATE_BUCKET }}
s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }}
tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }}
vpc-id: ${{ steps.commit_info.outputs.vpc_id }}
subnet-ids: ${{ steps.commit_info.outputs.subnet_ids }}
cidr-blocks: ${{ steps.commit_info.outputs.cidr_blocks }}
- name: Delete Aurora Cluster
timeout-minutes: 125
if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false')
uses: ./.github/actions/eks-cleanup-resources
with:
tf-bucket: ${{ env.TF_STATE_BUCKET }}
tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }}
max-age-hours: 0
target: ${{ steps.commit_info.outputs.cluster_name }}
- name: Notify in Slack in case of failure
id: slack-notification
if: failure() && github.event_name == 'schedule'
uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@main
with:
vault_addr: ${{ secrets.VAULT_ADDR }}
vault_role_id: ${{ secrets.VAULT_ROLE_ID }}
vault_secret_id: ${{ secrets.VAULT_SECRET_ID }}