-
Notifications
You must be signed in to change notification settings - Fork 3
/
docker-compose.test.yml
132 lines (120 loc) · 3.77 KB
/
docker-compose.test.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
version: '2.4'
services:
cleanup:
image: busybox@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209
command:
- sh
- -ce
- |
[ ! -f /admin/.kube/kubeconfig.yaml ] || rm -v /admin/.kube/kubeconfig.yaml
echo "Done. Exiting."
volumes:
- admin:/admin
master1:
hostname: master1
build:
context: .
dockerfile: ./k3s/docker-image/Dockerfile
image: yolean/ystack-runner:${GIT_COMMIT:-latest}-k3s
command:
- server
- --https-listen-port=17143
- --tls-san=ystack.local
- --node-name=master1
- --no-deploy=servicelb
- --kube-apiserver-arg
- service-node-port-range=31710-31719
tmpfs:
- /run
- /var/run
privileged: true
environment:
- K3S_TOKEN=somethingtotallyrandom
- K3S_KUBECONFIG_OUTPUT=/admin/.kube/kubeconfig.yaml
- K3S_KUBECONFIG_MODE=666
expose:
- 17143
- 8472
- 10250
volumes:
- k3s-server:/var/lib/rancher/k3s
- admin:/admin
mem_limit: 1800000000
memswap_limit: 0
ystack-proxy:
hostname: ystack-proxy
depends_on:
- master1
build:
context: .
dockerfile: ./k3s/docker-ystack-proxy/Dockerfile
image: yolean/ystack-runner:${GIT_COMMIT:-latest}-proxy
environment:
- KUBECONFIG_WAIT=30
# Scaled to zero by default because we'll want to introduce automated scale to demand
- BUILDKITD_REPLICAS=1
expose:
- 80
- 8547
- 9090
- 9093
volumes:
- admin:/admin
sut:
links:
- ystack-proxy:builds-registry.ystack.svc.cluster.local
- ystack-proxy:buildkitd.ystack.svc.cluster.local
- ystack-proxy:monitoring.ystack.svc.cluster.local
build:
context: .
# Note that with the current state of https://github.com/docker/docker-py/issues/2230 we must symlink the specific dockerignore to ./.dockerignore
dockerfile: runner.Dockerfile
image: yolean/ystack-runner:${GIT_COMMIT:-latest}
environment:
- KUBECONFIG_WAIT=30
- KEEP_RUNNING=false
- EXAMPLES=sync-only sync-to-runtime in-cluster-build basic-dev-inner-loop
- CI=true
volumes:
- admin:/admin
- ./examples:/usr/local/src/ystack/examples
- ./specs:/usr/local/src/ystack/specs
entrypoint:
- /bin/bash
- -cx
command:
- |
[ "$$KEEP_RUNNING" = "true" ] || set -e
mkdir ~/.kube
until test -f /admin/.kube/kubeconfig.yaml; do
[ $$KUBECONFIG_WAIT -gt 0 ] || exit ${BULID_EXIT_CODE_ON_NO_CLUSTER:-0}
KUBECONFIG_WAIT=$$(( $$KUBECONFIG_WAIT - 1 ))
echo "Waiting for a kubeconfig ..." && sleep 1
done
cat /admin/.kube/kubeconfig.yaml | sed 's|127.0.0.1|master1|' > ~/.kube/config
kubectl-waitretry --for=condition=Ready node --all
kubectl get nodes
echo "Our ystack-proxy image should install ystack essentials ..."
kubectl-waitretry --for=condition=Ready -n ystack pod -l app=minio
kubectl-waitretry --for=condition=Ready -n ystack pod -l ystack-builds-registry=http
kubectl-waitretry --for=condition=Ready -n ystack pod -l app=buildkitd
curl -f --retry 5 http://builds-registry.ystack.svc.cluster.local/v2/
y-cluster-assert-install --context=default
for EXAMPLE in $$EXAMPLES; do
echo "# Running example $$EXAMPLE ..."
cd /usr/local/src/ystack/examples/$$EXAMPLE
y-skaffold run
done
echo "# Running main specs ..."
kubectl create namespace ystack-specs
kubectl config set-context --current --namespace=ystack-specs
(cd /usr/local/src/ystack/specs && y-assert)
if [ "$$KEEP_RUNNING" = "true" ]; then
echo "Will stay running for manual work"
sleep infinity
fi
mem_limit: 80000000
memswap_limit: 0
volumes:
k3s-server: {}
admin: {}