-
Notifications
You must be signed in to change notification settings - Fork 9
206 lines (182 loc) · 8.99 KB
/
test-suite.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
name: Lagoon tests
on: pull_request
jobs:
# runs for lagoon-core, lagoon-remote, lagoon-test
test-suite:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
test:
- active-standby-kubernetes
- api
- deploytarget
- features-kubernetes
- features-kubernetes-2
- features-variables
- services
- tasks
## Re-enable any of these tests in your branch for specific testing
## - bitbucket
## - bulk-deployment
## - drush
## - generic
## - github
## - gitlab
## - image-cache
## - nginx
## - node
## - python
## - ssh-legacy
## - workflows
steps:
# Continue after getting a shell via: `touch continue`
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
timeout-minutes: 1
continue-on-error: true
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Set up chart-testing dependencies
run: sudo apt-get -y install python3-wheel
- name: Set up chart-testing
uses: helm/[email protected]
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ./test-suite-lint.ct.yaml)
if [[ "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
echo "$changed"
fi
- name: Configure node IP in kind-config.yaml
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
docker network create kind
LAGOON_KIND_CIDR_BLOCK=$(docker network inspect kind | jq '. [0].IPAM.Config[0].Subnet' | tr -d '"')
export KIND_NODE_IP=$(echo ${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $1,$2,$3,240}' OFS='.')
envsubst < test-suite.kind-config.yaml.tpl > test-suite.kind-config.yaml
- name: Create kind cluster
uses: helm/[email protected]
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
with:
version: v0.24.0
node_image: kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114
kubectl_version: v1.30.4
config: test-suite.kind-config.yaml
- name: Check node IP matches kind configuration
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
LAGOON_KIND_CIDR_BLOCK=$(docker network inspect kind | jq '. [0].IPAM.Config[0].Subnet' | tr -d '"')
NODE_IP=$(echo ${LAGOON_KIND_CIDR_BLOCK%???} | awk -F'.' '{print $1,$2,$3,240}' OFS='.')
echo Checking for NODE_IP "$NODE_IP"
grep $NODE_IP test-suite.kind-config.yaml
- name: Add dependency chart repos
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
helm repo add harbor https://helm.goharbor.io
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo add stable https://charts.helm.sh/stable
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add amazeeio https://amazeeio.github.io/charts/
helm repo add lagoon https://uselagoon.github.io/lagoon-charts/
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm repo add metallb https://metallb.github.io/metallb
helm repo add jetstack https://charts.jetstack.io
helm repo add jouve https://jouve.github.io/charts/
- name: Install gojq
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
cd /tmp
curl -sSLO https://github.com/itchyny/gojq/releases/download/v0.12.16/gojq_v0.12.16_linux_amd64.tar.gz
tar -xf ./gojq_v0.12.16_linux_amd64.tar.gz
sudo cp /tmp/gojq_v0.12.16_linux_amd64/gojq /usr/local/bin/jq
- name: Install kubens and kubectl alias
run: |
cd /tmp
curl -sSLO https://github.com/ahmetb/kubectx/releases/download/v0.9.5/kubens_v0.9.5_linux_x86_64.tar.gz
tar -xf ./kubens_v0.9.5_linux_x86_64.tar.gz
sudo cp /tmp/kubens /usr/local/bin/kubens
sudo ln -s $(which kubectl) /usr/local/bin/kc
- name: Helm-install the test fixtures and fill lagoon-test/ci/linter-values.yaml (needs-testing)
if: |
(steps.list-changed.outputs.changed == 'true' && !contains(github.event.pull_request.labels.*.name, 'next-release')) ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: make -j8 -O fill-test-ci-values TESTS=[${{ matrix.test }}]
- name: Helm-install the test fixtures and fill lagoon-test/ci/linter-values.yaml (next-release)
if: |
(steps.list-changed.outputs.changed == 'true') &&
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
yq eval-all --inplace 'select(fileIndex == 0) * select(fileIndex == 1)' ./charts/lagoon-core/ci/linter-values.yaml ./charts/lagoon-core/ci/testlagoon-main-override.yaml
make -j8 -O fill-test-ci-values TESTS=[${{ matrix.test }}] IMAGE_REGISTRY=testlagoon IMAGE_TAG=main OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=uselagoon/build-deploy-image:main OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE=testlagoon/task-activestandby:main
- name: Free up some disk space
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: docker system prune -f -a --volumes
- name: Run chart-testing (install) on lagoon-test
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing')) ||
(contains(github.event.pull_request.labels.*.name, 'next-release'))
run: |
ct lint --config ./test-suite-run.ct.yaml
ct install --config ./test-suite-run.ct.yaml --helm-extra-args "--timeout 60m"
# the following steps gather various debug information on test failure
- name: Inspect lagoon-test pods
if: failure()
run: |
kubectl get pods -A --selector=app.kubernetes.io/name=lagoon-test
kubectl describe pods --namespace=lagoon-core --selector=app.kubernetes.io/name=lagoon-test
kubectl logs --namespace=lagoon-core --prefix --timestamps --tail=-1 --all-containers --selector=app.kubernetes.io/name=lagoon-test
- name: Inspect lagoon-remote and lagoon-build-deploy pods
if: failure()
run: |
kubectl get pods -A -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
kubectl describe pods --namespace=lagoon -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
kubectl logs --namespace=lagoon --prefix --timestamps --tail=-1 --all-containers -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
- name: Inspect lagoon-core pods
if: failure()
run: |
kubectl get pods -A --selector=app.kubernetes.io/instance=lagoon-core
kubectl describe pods --namespace=lagoon-core --selector=app.kubernetes.io/instance=lagoon-core
kubectl logs --namespace=lagoon-core --prefix --timestamps --tail=-1 --all-containers --selector=app.kubernetes.io/instance=lagoon-core
- name: Inspect any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl get events --sort-by=metadata.creationTimestamp --namespace=$ns
kubectl get pods --output=wide --namespace=$ns
kubectl describe pods --namespace=$ns
done
- name: Gather build logs of any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl logs --tail=80 --namespace=$ns --prefix --timestamps --all-containers --selector=lagoon.sh/jobType
done
- name: Gather workload logs of any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl logs --tail=80 --namespace=$ns --prefix --timestamps --all-containers --selector=lagoon.sh/version
done