-
Notifications
You must be signed in to change notification settings - Fork 46
/
docker-compose.yml
203 lines (202 loc) · 7.73 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
---
version: '3.8'
services:
db:
# Use the same version as in https://gitlab.cee.redhat.com/service/app-interface/-/blob/ff61d457898da76ebd4abf21fe3ce7b5c74c87a5/data/services/insights/rhsm/namespaces/rhsm-prod.yml#L179
# When updating the image, remember to also update the following locations:
# - SwatchPostgreSQLContainer.POSTGRESQL_IMAGE
# - .github/workflows/validate-floorplan-queries.yaml (step "Setup Postgresql Database")
image: quay.io/centos7/postgresql-12-centos7:centos7
environment:
- POSTGRES_HOST_AUTH_METHOD=trust
- POSTGRESQL_MAX_CONNECTIONS=5000
- POSTGRESQL_ADMIN_PASSWORD=admin
healthcheck:
test: ["CMD", "pg_isready", "--username=postgres", "--host=127.0.0.1", "--port=5432"]
interval: 2s
timeout: 1m
retries: 5
start_period: 10s
volumes:
- ./init_dbs.sh:/usr/share/container-scripts/postgresql/init/init_dbs.sh:z
- ./postgresql.conf:/opt/app-root/src/postgresql-cfg/postgresql.conf:z
- ./pg_hba.conf:/pg_hba.conf:z
ports:
- "127.0.0.1:5432:5432"
networks:
swatch-network:
aliases:
- db
kafka:
container_name: swatch-kafka
image: quay.io/strimzi/kafka:latest-kafka-3.1.0
command: sh /init_kafka.sh
environment:
# Enable SSL debugging
# - KAFKA_OPTS=-Djavax.net.debug=ssl,handshake,data,trustmanager,keymanager
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,SASL_SSL:SASL_SSL
- KAFKA_LISTENERS=PLAINTEXT://:29092,PLAINTEXT_HOST://:9092,CONTROLLER://:9093,SASL_SSL://:9094
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092,SASL_SSL://localhost:9094
- LOG_DIR=/tmp/logs
- KAFKA_SSL_CLIENT_AUTH=required
- KAFKA_SSL_KEYSTORE_LOCATION=/etc/kafka/secrets/certs/server.jks
- KAFKA_SSL_KEYSTORE_PASSWORD=password
- KAFKA_SSL_TRUSTSTORE_LOCATION=/etc/kafka/secrets/certs/test-ca.jks
- KAFKA_SSL_TRUSTSTORE_PASSWORD=password
# Passed directly to the JVM. Place SSL debug options here (e.g. -Djavax.net.debug=ssl)
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
# Note that as of this writing (20 Jul 2022), Kraft mode does not support SCRAM
- KAFKA_SASL_MECHANISM=PLAIN
healthcheck:
test: ./bin/kafka-cluster.sh cluster-id --bootstrap-server 127.0.0.1:9092 || exit 1
interval: 2s
timeout: 1m
retries: 5
start_period: 10s
ports:
- "127.0.0.1:9092:9092"
- "[::1]:9092:9092"
# Port 9093 is used by the Kraft configuration
- "127.0.0.1:9094:9094"
- "127.0.0.1:29092:29092"
volumes:
- ./config/kafka/init_kafka.sh:/init_kafka.sh:z
- ./config/kafka/kafka_server_jaas.conf:/etc/kafka/kafka_server_jaas.conf:z
- ./config/kafka/:/etc/kafka/secrets/certs:z
networks:
swatch-network:
aliases:
- kafka
user: root
kafka-rest:
image: docker.io/confluentinc/cp-kafka-rest
environment:
- KAFKA_REST_BOOTSTRAP_SERVERS=kafka:29092
depends_on:
kafka:
condition: service_healthy
kafka-setup:
image: quay.io/strimzi/kafka:latest-kafka-3.1.0
command: |
/bin/bash -c "
bin/kafka-topics.sh --bootstrap-server=kafka:29092 --create --if-not-exists --partitions 1 --replication-factor 1 --topic platform.inventory.host-ingress
bin/kafka-topics.sh --bootstrap-server=kafka:29092 --create --if-not-exists --partitions 1 --replication-factor 1 --topic platform.inventory.events
bin/kafka-topics.sh --bootstrap-server=kafka:29092 --create --if-not-exists --partitions 1 --replication-factor 1 --topic platform.notifications.ingress
"
depends_on:
kafka:
condition: service_healthy
kafka-topics-ui:
image: docker.io/landoop/kafka-topics-ui
environment:
- KAFKA_REST_PROXY_URL=http://kafka-rest:8082
- PROXY=true
ports:
- "127.0.0.1:3030:8000"
restart: on-failure
depends_on:
- kafka-rest
kafka-bridge:
image: quay.io/strimzi/kafka-bridge:latest
entrypoint: /opt/strimzi/bin/kafka_bridge_run.sh
command: --config-file=config/application-kafka-bridge.properties
ports:
- "127.0.0.1:9080:8080"
volumes:
- ./config/application-kafka-bridge.properties:/opt/strimzi/config/application-kafka-bridge.properties:Z
networks:
- swatch-network
user: root
depends_on:
kafka:
condition: service_healthy
inventory:
image: quay.io/cloudservices/insights-inventory
entrypoint: /bin/bash -c
command: ["make upgrade_db && ./run_gunicorn.py"]
environment:
- INVENTORY_LOG_LEVEL=DEBUG
- INVENTORY_DB_HOST=db
- KAFKA_BOOTSTRAP_SERVERS=kafka:29092
- prometheus_multiproc_dir=/tmp
- BYPASS_RBAC=true
- FLASK_APP=./manage.py
- UNLEASH_TOKEN=default:development.unleash-insecure-api-token
- UNLEASH_URL=http://unleash:4242/api
- UNLEASH_CACHE_DIR=/tmp/.unleashcache
depends_on:
kafka:
condition: service_healthy
db:
condition: service_healthy
unleash:
condition: service_healthy
restart: unless-stopped
ports:
- "127.0.0.1:8050:8000"
user: root
inventory-mq:
image: quay.io/cloudservices/insights-inventory
entrypoint: /bin/bash -c
command: ["make run_inv_mq_service"]
environment:
- INVENTORY_LOG_LEVEL=DEBUG
- INVENTORY_DB_HOST=db
- KAFKA_BOOTSTRAP_SERVERS=kafka:29092
- prometheus_multiproc_dir=/tmp
- BYPASS_RBAC=true
- FLASK_APP=./manage.py
- UNLEASH_TOKEN=default:development.unleash-insecure-api-token
- UNLEASH_URL=http://unleash:4242
- UNLEASH_CACHE_DIR=/tmp/.unleashcache
depends_on:
kafka:
condition: service_healthy
db:
condition: service_healthy
kafka-setup:
condition: service_healthy
inventory: # main inventory deployment runs the db migrations
condition: service_healthy
user: root
unleash:
image: quay.io/cloudservices/unleash-server:5.8.2
environment:
- INIT_CLIENT_API_TOKENS=default:development.unleash-insecure-api-token
- INIT_ADMIN_API_TOKENS=*:*.unleash-insecure-admin-api-token
- CHECK_VERSION=false
- DATABASE_HOST=db
- DATABASE_NAME=unleash
- DATABASE_USERNAME=${DATABASE_USER:-unleash}
- DATABASE_PASSWORD=${DATABASE_PASSWORD:-unleash}
- DATABASE_SSL=false
- IMPORT_DROP_BEFORE_IMPORT=false
- IMPORT_FILE=/.unleash/flags.json
- IMPORT_DROP_BEFORE_IMPORT=true
- LOG_LEVEL=INFO
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:4242/health || exit 1
interval: 2s
timeout: 1m
retries: 5
start_period: 10s
# Why do we do this? Because there is an issue around the postgresql container where it
# starts, runs some initialization scripts, and then restarts itself. See
# https://github.com/sclorg/postgresql-container/blob/master/12/root/usr/bin/run-postgresql#L54-L58
# This fools the healthcheck into reporting that the container is ready, only for the
# postgresql process to stop and restart. Meanwhile, the unleash container tries to connect,
# fails, and doesn't start. So we add this shim script to wait until the connection is stable.
# The script itself is sourced from the https://github.com/Unleash/unleash-docker container
command: ["/bin/sh", "/unleash/wait-for", "db:5432", "--", "node", "index.js"]
depends_on:
db:
condition: service_healthy
ports:
- "127.0.0.1:4242:4242"
volumes:
- './bin/wait-for:/unleash/wait-for:z'
- './.unleash:/.unleash:z'
networks:
swatch-network:
name: swatch-network
driver: bridge