-
Notifications
You must be signed in to change notification settings - Fork 74
189 lines (166 loc) · 6.77 KB
/
test-amazon-web-services-install.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
#
# This is an e2e test to deploy PostHog on Amazon Web Services using Helm.
#
# TODO:
# - run k8s spec test
#
name: e2e - Amazon Web Services (install)
on:
workflow_dispatch:
# Please do not add 'pull_request' here as without the proper
# GitHub settings might lead 3rd party users to run commands
# into our cloud account for testing
push:
branches:
- main
jobs:
aws-install:
runs-on: ubuntu-20.04
if: github.repository == 'PostHog/charts-clickhouse'
#
# These permissions are needed to interact with GitHub's OIDC Token endpoint.
# We use OpenID Connect (OIDC) to allow this GitHub Action to access and manage
# AWS resources without needing to store the AWS credentials as long-lived GitHub secrets.
#
# see: https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
#
permissions:
id-token: write
contents: write
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: us-east-1
role-duration-seconds: 7200
- name: Install doctl to manage 'posthog.cc' DNS
uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- name: Install eksctl to easily manage EKS clusters
env:
EKSCTL_VERSION: 0.115.0
run: |
curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/v${EKSCTL_VERSION}/eksctl_$(uname -s)_amd64.tar.gz" | tar xz
./eksctl version
- name: Declare variables that we can share across steps
id: vars
run: |
TEST_NAME="helm-test-e2e-aws-$(date '+%F')-$(git rev-parse --short HEAD)"
echo "k8s_cluster_name=${TEST_NAME}" >> $GITHUB_OUTPUT
echo "dns_record=${TEST_NAME}" >> $GITHUB_OUTPUT
echo "fqdn_record=${TEST_NAME}.posthog.cc" >> $GITHUB_OUTPUT
- name: Deploy a new k8s cluster
id: k8s_cluster_creation
run: |
# note: we manually specify all the AZs in the region to reduce the
# possibility AWS runs out of capacity in a single one, making this
# test fail.
./eksctl create cluster \
--name ${{ steps.vars.outputs.k8s_cluster_name }} \
--region=us-east-1 \
--zones="us-east-1a,us-east-1b,us-east-1c,us-east-1d,us-east-1f" \
--instance-types="m6i.xlarge" \
--tags="provisioned_by=github_action" \
--version 1.24 \
--nodes 3
# EKS clusters use IAM users and roles to control access to the cluster.
# The rules are implemented in a config map called aws-auth.
# eksctl provides commands to read and edit this config map.
./eksctl create iamidentitymapping \
--cluster ${{ steps.vars.outputs.k8s_cluster_name }} \
--region us-east-1 \
--arn arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/OrganizationAccountAccessRole \
--username admin \
--group system:masters
- name: Install PostHog using the Helm chart
id: helm_install
run: |
helm upgrade --install \
-f ci/values/amazon_web_services.yaml \
--set "ingress.hostname=${{ steps.vars.outputs.fqdn_record }}" \
--timeout 30m \
--create-namespace \
--namespace posthog \
posthog ./charts/posthog \
--wait-for-jobs \
--wait
#
# Wait for all k8s resources to be ready.
#
# Despite the --wait flag used in the command above
# there is no guarantee that all the resources will be deployed
# when the command returns.
#
#
# Why can't we directly use the 'action-k8s-await-workloads' step below?
# Because it's not working for this use case
#
# ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38
#
- name: Workaround - wait for all the PostHog resources in k8s to be ready
timeout-minutes: 15
run: ./ci/wait_for_all_the_posthog_resources_to_be_ready.sh
- name: Workaround - wait for the AWS load balancer to be ready
timeout-minutes: 15
run: |
echo "Waiting for the AWS Load Balancer to be ready..."
load_balancer_external_hostname=""
while [ -z "$load_balancer_external_hostname" ];
do
echo " sleeping 10 seconds" && sleep 10
load_balancer_external_hostname=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
done
echo "The AWS Load Balancer is now ready!"
- name: Wait until all the resources are fully deployed in k8s
uses: jupyterhub/action-k8s-await-workloads@main
with:
namespace: "posthog"
timeout: 300
max-restarts: 10
- name: Create the DNS record
id: dns_creation
run: |
# Get the Load Balancer IP address
load_balancer_external_hostname=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
# Create the DNS record
doctl compute domain records create \
posthog.cc \
--record-type CNAME \
--record-ttl 60 \
--record-name "${{ steps.vars.outputs.dns_record }}" \
--record-data "${load_balancer_external_hostname}."
- name: Setup PostHog for the ingestion test
run: ./ci/setup_ingestion_test.sh
- name: Set PostHog endpoints to use for the ingestion test
run: |
echo "POSTHOG_API_ENDPOINT=http://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV"
echo "POSTHOG_EVENT_ENDPOINT=http://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV"
- name: Run ingestion test using k6
uses: k6io/[email protected]
with:
filename: ci/k6/ingestion-test.js
- name: Emit namespace report
uses: jupyterhub/action-k8s-namespace-report@v1
if: always()
with:
namespace: posthog
- name: Delete the k8s cluster and all the associated resources
if: ${{ always() && steps.k8s_cluster_creation.outcome == 'success' }}
run: |
./eksctl delete cluster \
--region=us-east-1 \
--name=${{ steps.vars.outputs.k8s_cluster_name }} \
--force \
--wait
- name: Delete the DNS record
if: ${{ always() && steps.dns_creation.outcome == 'success' }}
run: |
DNS_RECORD_ID=$(doctl compute domain records list posthog.cc --no-header --format ID,Name | grep ${{ steps.vars.outputs.dns_record }} | awk '{print $1}')
doctl compute domain records delete \
posthog.cc \
--force \
"$DNS_RECORD_ID"