-
Notifications
You must be signed in to change notification settings - Fork 74
157 lines (138 loc) · 5.77 KB
/
test-digitalocean-install.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#
# This is an e2e test to deploy PostHog on DigitalOcean using Helm.
#
# TODO:
# - run k8s spec test
#
name: e2e - DigitalOcean (install)
on:
workflow_dispatch:
# Please do not add 'pull_request' here as without the proper
# GitHub settings might lead 3rd party users to run commands
# into our cloud account for testing
push:
branches:
- main
jobs:
do-install:
runs-on: ubuntu-20.04
if: github.repository == 'PostHog/charts-clickhouse'
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install doctl to manage 'posthog.cc' DNS
uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- name: Declare variables that we can share across steps
id: vars
run: |
TEST_NAME="helm-test-e2e-do-$(date '+%F')-$(git rev-parse --short HEAD)"
echo "k8s_cluster_name=${TEST_NAME}" >> $GITHUB_OUTPUT
echo "dns_record=${TEST_NAME}" >> $GITHUB_OUTPUT
echo "fqdn_record=${TEST_NAME}.posthog.cc" >> $GITHUB_OUTPUT
- name: Deploy a new k8s cluster
id: k8s_cluster_creation
run: |
# Get the DO K8 version slug
DO_K8S_VERSION=$(doctl k8s options versions -ojson | jq --raw-output 'first(.[] | select(.slug | test("^1.24.[0-9]+-do.[0-9]+$")) | .slug)')
doctl k8s clusters create \
${{ steps.vars.outputs.k8s_cluster_name }} \
--version "$DO_K8S_VERSION" \
--tag="provisioned_by:github_action" \
--size s-4vcpu-8gb \
--count 3 \
--wait
- name: Install PostHog using the Helm chart
id: helm_install
run: |
helm upgrade --install \
-f ci/values/digital_ocean.yaml \
--set ingress.hostname="${{ steps.vars.outputs.fqdn_record }}" \
--set ingress-nginx.controller.service.annotations."service\.beta\.kubernetes\.io/do-loadbalancer-name"="${{ steps.vars.outputs.k8s_cluster_name }}" \
--set ingress-nginx.controller.service.annotations."service\.beta\.kubernetes\.io/do-loadbalancer-hostname"="${{ steps.vars.outputs.fqdn_record }}" \
--timeout 30m \
--create-namespace \
--namespace posthog \
posthog ./charts/posthog \
--wait-for-jobs \
--wait
#
# Wait for all k8s resources to be ready.
#
# Despite the --wait flag used in the command above
# there is no guarantee that all the resources will be deployed
# when the command returns.
#
#
# Why can't we directly use the 'action-k8s-await-workloads' step below?
# Because it's not working for this use case
#
# ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38
#
- name: Workaround - wait for all the PostHog resources in k8s to be ready
timeout-minutes: 15
run: ./ci/wait_for_all_the_posthog_resources_to_be_ready.sh
- name: Workaround - wait for the DO load balancer to be ready
timeout-minutes: 15
run: |
echo "Waiting for the DigitalOcean Load Balancer to be ready..."
load_balancer_external_hostname=""
while [ -z "$load_balancer_external_hostname" ];
do
echo " sleeping 10 seconds" && sleep 10
load_balancer_external_hostname=$(kubectl get ingress -n posthog posthog -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
done
echo "The DigitalOcean Load Balancer is now ready!"
- name: Wait until all the resources are fully deployed in k8s
uses: jupyterhub/action-k8s-await-workloads@main
with:
namespace: "posthog"
timeout: 300
max-restarts: 10
- name: Create the DNS record
id: dns_creation
run: |
#
# We need to get the Load Balancer IP address from 'doctl' instead of 'kubectl' as if we provision the LB with
# the 'service.beta.kubernetes.io/do-loadbalancer-hostname' annotation, the ".status.loadBalancer.ingress" value
# will default to the value of if (and we can't create a DNS record pointing to the same record)
#
load_balancer_external_ip=$(doctl compute load-balancer list --no-header --format Name,IP | grep ${{ steps.vars.outputs.k8s_cluster_name }} | awk '{print $2}')
# Create the DNS record
doctl compute domain records create \
posthog.cc \
--record-type A \
--record-ttl 60 \
--record-name "${{ steps.vars.outputs.dns_record }}" \
--record-data "${load_balancer_external_ip}"
- name: Setup PostHog for the ingestion test
run: ./ci/setup_ingestion_test.sh
- name: Set PostHog endpoints to use for the ingestion test
run: |
echo "POSTHOG_API_ENDPOINT=http://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV"
echo "POSTHOG_EVENT_ENDPOINT=http://${{ steps.vars.outputs.fqdn_record }}" | tee -a "$GITHUB_ENV"
- name: Run ingestion test using k6
uses: k6io/[email protected]
with:
filename: ci/k6/ingestion-test.js
- name: Emit namespace report
uses: jupyterhub/action-k8s-namespace-report@v1
if: always()
with:
namespace: posthog
- name: Delete the k8s cluster and all the associated resources
if: ${{ always() && steps.k8s_cluster_creation.outcome == 'success' }}
run: |
doctl k8s cluster delete \
--dangerous \
--force \
${{ steps.vars.outputs.k8s_cluster_name }}
- name: Delete the DNS record
if: ${{ always() && steps.dns_creation.outcome == 'success' }}
run: |
DNS_RECORD_ID=$(doctl compute domain records list posthog.cc --no-header --format ID,Name | grep ${{ steps.vars.outputs.dns_record }} | awk '{print $1}')
doctl compute domain records delete \
posthog.cc \
--force \
"$DNS_RECORD_ID"