forked from supaglue-labs/supaglue
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
121 lines (114 loc) · 3.49 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
x-common-env: &common-env
NODE_ENV: development
SUPAGLUE_ENVIRONMENT: development
SUPAGLUE_DATABASE_URL: ${SUPAGLUE_DATABASE_URL:-postgres://postgres:supaglue@postgres:5432/postgres?schema=api}
SUPAGLUE_MANAGED_DATABASE_URL: ${SUPAGLUE_MANAGED_DATABASE_URL:-postgres://postgres:supaglue@postgres:5432/postgres}
SUPAGLUE_PRETTY_LOGS: 1
SUPAGLUE_API_ENCRYPTION_SECRET: ${SUPAGLUE_API_ENCRYPTION_SECRET:-some-per-customer-secret-salt}
SVIX_API_TOKEN: ${SVIX_API_TOKEN:-testsk_sLr_YXpHsUKnVZFQSPH17YIXbBGn9Nl3.us}
SUPAGLUE_LOG_LEVEL: debug
SUPAGLUE_DEPLOYMENT_ID:
SALESFORCE_CLIENT_ID:
SALESFORCE_CLIENT_SECRET:
HUBSPOT_CLIENT_ID:
HUBSPOT_CLIENT_SECRET:
GONG_CLIENT_ID:
GONG_CLIENT_SECRET:
OUTREACH_CLIENT_ID:
OUTREACH_CLIENT_SECRET:
SALESLOFT_CLIENT_ID:
SALESLOFT_CLIENT_SECRET:
# Edit this when using ngrok
SUPAGLUE_SERVER_URL: ${SUPAGLUE_SERVER_URL:-http://localhost:8080}
SUPAGLUE_MAGIC_LINK_URL:
CLERK_JWKS_URL: ${CLERK_JWKS_URL:-https://witty-eft-29.clerk.accounts.dev/.well-known/jwks.json}
# For debugging only
GLOBAL_AGENT_HTTP_PROXY: ${GLOBAL_AGENT_HTTP_PROXY}
NODE_TLS_REJECT_UNAUTHORIZED: 0 # For https debugging proxy to work....
HUBSPOT_WEBHOOK_TARGET_URL:
x-fe-api-common-env: &fe-api-common-env
SUPAGLUE_INTERNAL_TOKEN: some-internal-token
NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY: ${NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY:-some-key}
CLERK_SECRET_KEY: ${CLERK_SECRET_KEY:-some-key}
NEXT_PUBLIC_SUPAGLUE_ENVIRONMENT: docker
NEXT_PUBLIC_SUPAGLUE_DISABLE_ERROR_REPORTING: 1
services:
postgres:
image: postgres:15
ports:
- '5432:5432'
volumes:
- pgdata:/var/lib/postgresql/data
restart: on-failure
environment:
POSTGRES_DB: ${POSTGRES_DATABASE:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-supaglue}
api:
image: node:18-bullseye
ports:
- '8080:8080'
depends_on:
postgres:
condition: service_started
init:
condition: service_completed_successfully
volumes:
- .:/app
working_dir: /app
environment:
<<: [*common-env, *fe-api-common-env]
SUPAGLUE_API_PORT: ${SUPAGLUE_API_PORT:-8080}
SUPAGLUE_CORS_ORIGIN: ${SUPAGLUE_CORS_ORIGIN:-http://localhost:3000}
SUPAGLUE_SYNC_PERIOD_MS:
ADMIN_PASSWORD: ${ADMIN_PASSWORD:-admin}
restart: on-failure
command: /bin/sh -c "./apps/api/scripts/start_dev.sh"
sync-worker:
image: node:18-bullseye
depends_on:
postgres:
condition: service_started
init:
condition: service_completed_successfully
volumes:
- .:/app
working_dir: /app
environment:
<<: *common-env
restart: on-failure
command: /bin/sh -c "./apps/sync-worker/scripts/start_dev.sh"
temporal:
image: alpine:3.18.0
ports:
- 7233:7233
- 8233:8233
restart: on-failure
command:
- /bin/sh
- -c
- |
apk add --update curl
rm -rf /var/cache/apk/*
curl -sSf https://temporal.download/cli.sh | sh
/root/.temporalio/bin/temporal server start-dev -n default --ip 0.0.0.0 -f /data/temporal.db
volumes:
- temporalitedata:/data
init:
image: node:18-bullseye
environment:
<<: *common-env
DO_SEED: '0'
volumes:
- .:/app
working_dir: /app
command:
- sh
- -c
- |
yarn install
yarn workspace @supaglue/db prisma migrate dev
yarn workspace @supaglue/db prisma db seed
yarn workspace api init-temporal
volumes:
pgdata:
temporalitedata: