-
Notifications
You must be signed in to change notification settings - Fork 74
145 lines (124 loc) · 5.08 KB
/
test-k6.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
name: e2e - k6 ingestion test
on:
workflow_dispatch:
pull_request:
paths:
- ci/**
- charts/**
- .github/workflows/test-k6.yaml
push:
branches:
- main
jobs:
test-chart:
runs-on: ubuntu-20.04
timeout-minutes: 60
strategy:
# Keep running even if one variation of the job fail
fail-fast: false
matrix:
# We run this job multiple times with different parameterization
# specified below, these parameters have no meaning on their own and
# gain meaning on how job steps use them.
include:
- k3s-channel: v1.26
test: install
values: ci/values/k3s.yaml
- k3s-channel: v1.25
test: install
values: ci/values/k3s.yaml
- k3s-channel: v1.24
test: install
values: ci/values/k3s.yaml
# We run an upgrade test where we first install an already released
# Helm chart version and then upgrade to the version we are now
# testing. We test upgrading from the latest main version.
#
# It can be very useful to see the "Helm diff" step's output from the
# latest dev version.
- k3s-channel: v1.26
test: upgrade
values: ci/values/k3s.yaml
# We run one install test with split plugin-server ingestion/async
- k3s-channel: v1.26
test: install
values: ci/values/k3s-plugin-server-ingestion-async-split.yaml
# We run one install test with split plugin-server ingestion/exports/jobs/scheduler
- k3s-channel: v1.26
test: install
values: ci/values/k3s-plugin-server-split.yaml
steps:
- uses: actions/checkout@v3
- name: Setup k3s
run: |
curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=${{ matrix.k3s-channel }} INSTALL_K3S_EXEC="--disable=traefik --disable=metrics-server" sh -
mkdir ~/.kube
sudo k3s kubectl config view --raw > ~/.kube/config
- uses: actions/checkout@v3
if: matrix.test == 'upgrade'
with:
ref: main
path: main
- name: (Upgrade) Install our helm chart from main branch
if: matrix.test == 'upgrade'
run: |
helm upgrade --install -f ${{ matrix.values }} --timeout 30m --create-namespace --namespace posthog posthog ./main/charts/posthog --wait --wait-for-jobs --debug
- name: "(Upgrade) Install helm diff"
if: matrix.test == 'upgrade'
run: |
helm plugin install https://github.com/databus23/helm-diff --version 3.1.3
- name: "(Upgrade) Helm diff - 'main' branch VS local version"
if: matrix.test == 'upgrade'
run: |
helm diff upgrade --install -f ${{ matrix.values }} --namespace posthog posthog ./charts/posthog --debug
- name: "(Upgrade) Await chart install"
if: matrix.test == 'upgrade'
uses: jupyterhub/action-k8s-await-workloads@main
with:
namespace: "posthog"
timeout: 300
max-restarts: 10
- name: Install our helm chart
run: |
helm upgrade --debug --install -f ${{ matrix.values }} --timeout 30m --create-namespace --namespace posthog posthog ./charts/posthog --wait --wait-for-jobs --debug
#
# Wait for all k8s resources to be ready.
#
# Despite the --wait flag used in the command above
# there is no guarantee that all the resources will be deployed
# when the command returns.
#
#
# Why can't we directly use the 'action-k8s-await-workloads' step below?
# Because it's not working for this use case
#
# ref: https://github.com/jupyterhub/action-k8s-await-workloads/issues/38
#
- name: Workaround - wait for all the PostHog resources in k8s to be ready
timeout-minutes: 15
run: ./ci/wait_for_all_the_posthog_resources_to_be_ready.sh
- name: Wait until all the resources are fully deployed in k8s
uses: jupyterhub/action-k8s-await-workloads@main
with:
namespace: "posthog"
timeout: 300
max-restarts: 10
- name: Setup PostHog for the ingestion test
run: ./ci/setup_ingestion_test.sh
- name: Set PostHog endpoints to use for the ingestion test
run: |
POSTHOG_INGRESS=$(kubectl get --namespace posthog ingress posthog -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
echo "POSTHOG_API_ENDPOINT=http://${POSTHOG_INGRESS}" | tee -a "$GITHUB_ENV"
echo "POSTHOG_EVENT_ENDPOINT=http://${POSTHOG_INGRESS}" | tee -a "$GITHUB_ENV"
# Skip the source ip address check in ingestion-test.js
echo "SKIP_SOURCE_IP_ADDRESS_CHECK=true" | tee -a "$GITHUB_ENV"
- name: Run ingestion test using k6
uses: k6io/[email protected]
with:
filename: ci/k6/ingestion-test.js
- name: Emit namespace report
uses: jupyterhub/action-k8s-namespace-report@v1
if: always()
with:
namespace: posthog
important-workloads: deployments/posthog-plugins