Skip to content

add variable to control ssh portal and nats #1424

add variable to control ssh portal and nats

add variable to control ssh portal and nats #1424

Workflow file for this run

name: Lagoon tests
on: pull_request
jobs:
# runs for lagoon-core, lagoon-remote, lagoon-test
test-suite:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
test: [
{ test: "active-standby-kubernetes", sshportal: false },
{ test: "api", sshportal: false },
{ test: "deploytarget", sshportal: false },
{ test: "features-kubernetes", sshportal: true },
{ test: "features-kubernetes-2", sshportal: false },
{ test: "features-variables", sshportal: false },
{ test: "services", sshportal: false },
{ test: "tasks", sshportal: false }
]
## Re-enable any of these tests in your branch for specific testing
## { test: "bitbucket", sshportal: false },
## { test: "bulk-deployment", sshportal: false },
## { test: "drush", sshportal: true },
## { test: "generic", sshportal: false },
## { test: "github", sshportal: false },
## { test: "gitlab", sshportal: false },
## { test: "image-cache", sshportal: false },
## { test: "nginx", sshportal: false },
## { test: "node", sshportal: false },
## { test: "python", sshportal: false },
## { test: "ssh-legacy", sshportal: false },
## { test: "workflows", sshportal: false },
steps:
# Continue after getting a shell via: `touch continue`
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
timeout-minutes: 1
continue-on-error: true
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Set up chart-testing dependencies
run: sudo apt-get -y install python3-wheel
- name: Set up chart-testing
uses: helm/[email protected]
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ./test-suite-lint.ct.yaml)
if [[ "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
echo "$changed"
fi
- name: Configure node IP in kind-config.yaml
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: |
docker network create kind
export KIND_NODE_IP=$(docker run --network kind --rm alpine ip -o addr show eth0 | sed -nE 's/.* ([0-9.]{7,})\/.*/\1/p')
envsubst < test-suite.kind-config.yaml.tpl > test-suite.kind-config.yaml
- name: Create kind cluster
uses: helm/[email protected]
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
with:
version: v0.20.0
node_image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
kubectl_version: v1.27.3
config: test-suite.kind-config.yaml
- name: Check node IP matches kind configuration
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: |
NODE_IP="$(kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}')"
echo Checking for NODE_IP "$NODE_IP"
grep $NODE_IP test-suite.kind-config.yaml
- name: Add dependency chart repos
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: |
helm repo add harbor https://helm.goharbor.io
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo add stable https://charts.helm.sh/stable
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add amazeeio https://amazeeio.github.io/charts/
helm repo add lagoon https://uselagoon.github.io/lagoon-charts/
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
- name: Install gojq
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: |
cd /tmp
curl -sSLO https://github.com/itchyny/gojq/releases/download/v0.12.13/gojq_v0.12.13_linux_amd64.tar.gz
tar -xf ./gojq_v0.12.13_linux_amd64.tar.gz
sudo cp /tmp/gojq_v0.12.13_linux_amd64/gojq /usr/local/bin/jq
- name: Install kubens and kubectl alias
run: |
cd /tmp
curl -sSLO https://github.com/ahmetb/kubectx/releases/download/v0.9.5/kubens_v0.9.5_linux_x86_64.tar.gz
tar -xf ./kubens_v0.9.5_linux_x86_64.tar.gz
sudo cp /tmp/kubens /usr/local/bin/kubens
sudo ln -s $(which kubectl) /usr/local/bin/kc
- name: Set SSH Portal environment variable
run: |
echo "ENABLE_SSH_PORTAL=${{ matrix.test.sshportal }}" >> $GITHUB_ENV
- name: Helm-install the test fixtures and fill lagoon-test/ci/linter-values.yaml
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: make -j8 -O fill-test-ci-values TESTS=[${{ matrix.test.test }}] IMAGE_REGISTRY=testlagoon IMAGE_TAG=main
# run: make -j8 -O fill-test-ci-values TESTS=[${{ matrix.test.test }}]
- name: Free up some disk space
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: docker system prune -f -a --volumes
- name: Run chart-testing (install) on lagoon-test
if: |
(steps.list-changed.outputs.changed == 'true') ||
(contains(github.event.pull_request.labels.*.name, 'needs-testing'))
run: |
ct lint --config ./test-suite-run.ct.yaml
ct install --config ./test-suite-run.ct.yaml --helm-extra-args "--timeout 60m"
# the following steps gather various debug information on test failure
- name: Inspect lagoon-test pods
if: failure()
run: |
kubectl get pods -A --selector=app.kubernetes.io/name=lagoon-test
kubectl describe pods --namespace=lagoon --selector=app.kubernetes.io/name=lagoon-test
kubectl logs --namespace=lagoon --prefix --timestamps --tail=-1 --all-containers --selector=app.kubernetes.io/name=lagoon-test
- name: Inspect lagoon-remote and lagoon-build-deploy pods
if: failure()
run: |
kubectl get pods -A -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
kubectl describe pods --namespace=lagoon -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
kubectl logs --namespace=lagoon --prefix --timestamps --tail=-1 --all-containers -l 'app.kubernetes.io/instance in (lagoon-remote, lagoon-build-deploy)'
- name: Inspect lagoon-core pods
if: failure()
run: |
kubectl get pods -A --selector=app.kubernetes.io/instance=lagoon-core
kubectl describe pods --namespace=lagoon --selector=app.kubernetes.io/instance=lagoon-core
kubectl logs --namespace=lagoon --prefix --timestamps --tail=-1 --all-containers --selector=app.kubernetes.io/instance=lagoon-core
- name: Inspect any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl get events --sort-by=metadata.creationTimestamp --namespace=$ns
kubectl get pods --output=wide --namespace=$ns
kubectl describe pods --namespace=$ns
done
- name: Gather build logs of any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl logs --tail=80 --namespace=$ns --prefix --timestamps --all-containers --selector=lagoon.sh/jobType
done
- name: Gather workload logs of any remaining CI namespaces
if: failure()
run: |
for ns in $(kubectl get ns -o json | jq -r '.items[].metadata.name | select(match("^ci"))'); do
kubectl logs --tail=80 --namespace=$ns --prefix --timestamps --all-containers --selector=lagoon.sh/version
done