From ea9203cbe2ed683ad98f67effe69860f28c3b730 Mon Sep 17 00:00:00 2001 From: Bart <57799908+crypt0rr@users.noreply.github.com> Date: Tue, 24 Dec 2024 15:20:38 +0100 Subject: [PATCH] Move existing services to .env (#40) * AdguardHome Move to .env * Bazarr Move to .env * Beszel-Hub Move to .env * Comment port * Beszel-Agent move to .env * Excalidraw Move to .env * Homarr move to .env * Jellyfin move to .env * Port comment * LanguageTool move to .env * Nextcloud move to .env * Pihole move to .env * Plex move to .env * Portainer move to .env * Ports * Qbittorrent move to .env * Radarr move to .env * Resilio-sync move to .env * Searxng move to .env * Sonarr move to .env * Stirlingpdf move to .env * Tailscale Exit Node move to .env * Tautulli move to .env * Uptime-kuma move to .env * Vaultwarden move to .env --- services/adguardhome/.env | 8 +++ services/adguardhome/docker-compose.yml | 39 ++++++++------ services/bazarr/.env | 8 +++ services/bazarr/docker-compose.yml | 40 +++++++------- services/beszel/agent/.env | 8 +++ services/beszel/agent/docker-compose.yml | 44 ++++++++++------ services/beszel/hub/.env | 8 +++ services/beszel/hub/docker-compose.yml | 45 ++++++++++------ services/excalidraw/.env | 8 +++ services/excalidraw/docker-compose.yml | 43 +++++++++------ services/homarr/.env | 8 +++ services/homarr/docker-compose.yml | 38 ++++++++------ services/jellyfin/.env | 8 +++ services/jellyfin/docker-compose.yml | 37 +++++++------ services/languagetool/.env | 8 +++ services/languagetool/docker-compose.yml | 37 +++++++------ services/nextcloud/.env | 10 ++++ services/nextcloud/docker-compose.yml | 48 +++++++++-------- services/pihole/.env | 8 +++ services/pihole/docker-compose.yml | 47 +++++++++-------- services/plex/.env | 8 +++ services/plex/docker-compose.yml | 41 ++++++++------- services/portainer/.env | 8 +++ services/portainer/docker-compose.yml | 52 +++++++++++-------- services/qbittorrent/.env | 8 +++ services/qbittorrent/docker-compose.yml | 38 +++++++------- services/radarr/.env | 8 +++ services/radarr/docker-compose.yml | 41 ++++++++------- services/resilio-sync/.env | 8 +++ services/resilio-sync/docker-compose.yml | 46 +++++++++------- services/searxng/.env | 8 +++ services/searxng/docker-compose.yml | 42 +++++++++------ services/sonarr/.env | 8 +++ services/sonarr/docker-compose.yml | 40 +++++++------- services/stirlingpdf/.env | 8 +++ services/stirlingpdf/docker-compose.yml | 48 +++++++++-------- services/tailscale-exit-node/.env | 8 +++ .../tailscale-exit-node/docker-compose.yml | 21 ++++---- services/tautulli/.env | 8 +++ services/tautulli/docker-compose.yml | 46 ++++++++-------- services/uptime-kuma/.env | 8 +++ services/uptime-kuma/docker-compose.yml | 43 ++++++++------- services/vaultwarden/.env | 8 +++ services/vaultwarden/docker-compose.yml | 33 ++++++------ 44 files changed, 686 insertions(+), 401 deletions(-) create mode 100644 services/adguardhome/.env create mode 100644 services/bazarr/.env create mode 100644 services/beszel/agent/.env create mode 100644 services/beszel/hub/.env create mode 100644 services/excalidraw/.env create mode 100644 services/homarr/.env create mode 100644 services/jellyfin/.env create mode 100644 services/languagetool/.env create mode 100644 services/nextcloud/.env create mode 100644 services/pihole/.env create mode 100644 services/plex/.env create mode 100644 services/portainer/.env create mode 100644 services/qbittorrent/.env create mode 100644 services/radarr/.env create mode 100644 services/resilio-sync/.env create mode 100644 services/searxng/.env create mode 100644 services/sonarr/.env create mode 100644 services/stirlingpdf/.env create mode 100644 services/tailscale-exit-node/.env create mode 100644 services/tautulli/.env create mode 100644 services/uptime-kuma/.env create mode 100644 services/vaultwarden/.env diff --git a/services/adguardhome/.env b/services/adguardhome/.env new file mode 100644 index 0000000..d17fd5d --- /dev/null +++ b/services/adguardhome/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=adguardhome +IMAGE_URL=adguard/adguardhome:latest +SERVICEPORT=53 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 \ No newline at end of file diff --git a/services/adguardhome/docker-compose.yml b/services/adguardhome/docker-compose.yml index 8bffd15..6468ab8 100644 --- a/services/adguardhome/docker-compose.yml +++ b/services/adguardhome/docker-compose.yml @@ -1,18 +1,19 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-adguardhome: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-adguardhome # Name for local container management - hostname: dns # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/adguardhome/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/adguardhome/tailscale-adguardhome/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement @@ -20,6 +21,9 @@ services: ports: - "0.0.0.0:53:53/udp" # Binding port 53/udp to the local network - may be removed if only exposure to your Tailnet is required - "0.0.0.0:53:53/tcp" # Binding port 53/tcp to the local network - may be removed if only exposure to your Tailnet is required + # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below + # dns: + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -27,20 +31,21 @@ services: retries: 3 # Number of retries before marking as unhealthy start_period: 10s # Time to wait before starting health checks restart: always - - # AdGuard Home - adguardhome: - image: adguard/adguardhome:latest # Image to be used - network_mode: service:tailscale-adguardhome # Sidecar configuration to route Adguard Home through Tailscale - container_name: adguardhome # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management + environment: + - TZ=Europe/Amsterdam volumes: - - ${PWD}/adguardhome/workdir:/opt/adguardhome/work # Work directory for Adguard Home - you may need to change the path - - ${PWD}/adguardhome/configdir:/opt/adguardhome/conf # Config directory for Adguard Home - you may need to change the path + - ${PWD}/${SERVICE}/workdir:/opt/adguardhome/work # Work directory for Adguard Home - you may need to change the path + - ${PWD}/${SERVICE}/configdir:/opt/adguardhome/conf # Config directory for Adguard Home - you may need to change the path depends_on: - - tailscale-adguardhome + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "AdGuardHome"] # Check if AdGuard Home process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/bazarr/.env b/services/bazarr/.env new file mode 100644 index 0000000..3ad37df --- /dev/null +++ b/services/bazarr/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=bazarr +IMAGE_URL=lscr.io/linuxserver/bazarr:latest +SERVICEPORT=6767 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 \ No newline at end of file diff --git a/services/bazarr/docker-compose.yml b/services/bazarr/docker-compose.yml index f2d888f..df16545 100644 --- a/services/bazarr/docker-compose.yml +++ b/services/bazarr/docker-compose.yml @@ -1,26 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-bazarr: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-bazarr # Name for local container management - hostname: bazarr # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/bazarr/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/bazarr/tailscale-bazarr/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:6767:6767 # Binding port 6767 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -29,23 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # bazarr - bazarr: - image: lscr.io/linuxserver/bazarr:latest # Image to be used - network_mode: service:tailscale-bazarr # Sidecar configuration to route bazarr through Tailscale - container_name: bazarr # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 - TZ=Europe/Amsterdam volumes: - - ${PWD}/bazarr/config:/config - - ${PWD}/media/movies:/movies - - ${PWD}/media/tvseries:/tv + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/media/movies:/movies + - ${PWD}/${SERVICE}/media/tvseries:/tv depends_on: - - tailscale-bazarr + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "bazarr"] # Check if bazarr process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/beszel/agent/.env b/services/beszel/agent/.env new file mode 100644 index 0000000..da9ef93 --- /dev/null +++ b/services/beszel/agent/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=beszel-agent +IMAGE_URL=henrygd/beszel-agent:latest +SERVICEPORT=45876 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/beszel/agent/docker-compose.yml b/services/beszel/agent/docker-compose.yml index 874120f..fdeba2a 100644 --- a/services/beszel/agent/docker-compose.yml +++ b/services/beszel/agent/docker-compose.yml @@ -1,24 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-beszel-agent: - image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-beszel-agent # Name for local container management - hostname: monitor # Name used within your Tailscale environment + tailscale: + image: tailscale/tailscale:latest # Image to be used + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale + - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/beszel-agent/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/beszel-agent/tailscale-beszel-agent/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -27,16 +31,22 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Beszel Agent - beszel-agent: - image: henrygd/beszel-agent:latest # Image to be used - network_mode: service:tailscale-beszel-agent # Sidecar configuration to route Beszel Agent through Tailscale - container_name: beszel-agent # Name for local container management - volumes: - - /var/run/docker.sock:/var/run/docker.sock:ro # Read-only access to the docker.sock + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: PORT: 45876 KEY: "ssh-ed25519 " + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro # Read-only access to the docker.sock depends_on: - - tailscale-beszel-agent + - tailscale + healthcheck: + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 30s # Time to wait before starting health checks restart: always \ No newline at end of file diff --git a/services/beszel/hub/.env b/services/beszel/hub/.env new file mode 100644 index 0000000..62b07ae --- /dev/null +++ b/services/beszel/hub/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=beszel-hub +IMAGE_URL=henrygd/beszel:latest +SERVICEPORT=8090 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 \ No newline at end of file diff --git a/services/beszel/hub/docker-compose.yml b/services/beszel/hub/docker-compose.yml index 06ec458..9be206c 100644 --- a/services/beszel/hub/docker-compose.yml +++ b/services/beszel/hub/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-beszel-hub: - image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-beszel-hub # Name for local container management - hostname: monitor # Name used within your Tailscale environment + tailscale: + image: tailscale/tailscale:latest # Image to be used + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/beszel-hub/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/beszel-hub/tailscale-beszel-hub/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,13 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Beszel Hub - beszel-hub: - image: henrygd/beszel:latest # Image to be used - network_mode: service:tailscale-beszel-hub # Sidecar configuration to route Beszel Hub through Tailscale - container_name: beszel-hub # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam volumes: - - ${PWD}/beszel/hub/beszel_data:/beszel_data # Work directory for Beszel Hub - you may need to change the path + - ${PWD}/${SERVICE}/beszel_data:/beszel_data # Work directory for Beszel Hub - you may need to change the path depends_on: - - tailscale-beszel-hub + - tailscale + healthcheck: + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 30s # Time to wait before starting health checks restart: always \ No newline at end of file diff --git a/services/excalidraw/.env b/services/excalidraw/.env new file mode 100644 index 0000000..4cb48de --- /dev/null +++ b/services/excalidraw/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=excalidraw +IMAGE_URL=excalidraw/excalidraw +SERVICEPORT=80 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/excalidraw/docker-compose.yml b/services/excalidraw/docker-compose.yml index 1078f22..d1dcfad 100644 --- a/services/excalidraw/docker-compose.yml +++ b/services/excalidraw/docker-compose.yml @@ -1,24 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-excalidraw: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-excalidraw # Name for local container management - hostname: excalidraw # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/excalidraw/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/excalidraw/tailscale-excalidraw/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -27,14 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # excalidraw - excalidraw: - image: excalidraw/excalidraw:latest # Image to be used - network_mode: service:tailscale-excalidraw # Sidecar configuration to route excalidraw through Tailscale - container_name: excalidraw # Name for local container management - stdin_open: true + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - NODE_ENV=production + - TZ=Europe/Amsterdam + stdin_open: true + volumes: + - ${PWD}/${SERVICE}/app/config:/config depends_on: - - tailscale-excalidraw - restart: always + - tailscale + healthcheck: + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 30s # Time to wait before starting health checks + restart: always \ No newline at end of file diff --git a/services/homarr/.env b/services/homarr/.env new file mode 100644 index 0000000..66497d3 --- /dev/null +++ b/services/homarr/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=homarr +IMAGE_URL=ghcr.io/ajnart/homarr +SERVICEPORT=7575 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/homarr/docker-compose.yml b/services/homarr/docker-compose.yml index c41e51d..c3215e5 100644 --- a/services/homarr/docker-compose.yml +++ b/services/homarr/docker-compose.yml @@ -1,26 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-homarr: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-homarr # Name for local container management - hostname: homarr # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/homarr/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/homarr/tailscale-homarr/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:7575:7575 # Binding port 7575 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -29,20 +31,24 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # homarr - homarr: - image: ghcr.io/ajnart/homarr:latest # Image to be used - network_mode: service:tailscale-homarr # Sidecar configuration to route homarr through Tailscale - container_name: homarr # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam volumes: # - /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration - ${PWD}/homarr/configs:/app/data/configs - ${PWD}/homarr/icons:/app/public/icons - ${PWD}/homarr/data:/data depends_on: - - tailscale-homarr + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "homarr"] # Check if homarr process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/jellyfin/.env b/services/jellyfin/.env new file mode 100644 index 0000000..d8aec3d --- /dev/null +++ b/services/jellyfin/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=jellyfin +IMAGE_URL=lscr.io/linuxserver/jellyfin +SERVICEPORT=8096 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/jellyfin/docker-compose.yml b/services/jellyfin/docker-compose.yml index 80a4145..71611c2 100644 --- a/services/jellyfin/docker-compose.yml +++ b/services/jellyfin/docker-compose.yml @@ -1,29 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-jellyfin: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-jellyfin # Name for local container management - hostname: jellyfin # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/jellyfin/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/jellyfin/tailscale-jellyfin/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:8096:8096 # Binding port 8096 to the local network - may be removed if only exposure to your Tailnet is required - # - "0.0.0.0:7359:7359/udp" #Optional - Allows clients to discover Jellyfin on the local network - # - "0.0.0.0:1900:1900/udp" #Optional - Service discovery used by DNLA and clients - + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -32,11 +31,11 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # jellyfin - jellyfin: - image: lscr.io/linuxserver/jellyfin:latest # Image to be used - network_mode: service:tailscale-jellyfin # Sidecar configuration to route jellyfin through Tailscale - container_name: jellyfin # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 @@ -48,9 +47,9 @@ services: - ${PWD}/media/tvseries:/data/tvshows - ${PWD}/media/movies:/data/movies depends_on: - - tailscale-jellyfin + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "jellyfin"] # Check if jellyfin process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/languagetool/.env b/services/languagetool/.env new file mode 100644 index 0000000..926d36a --- /dev/null +++ b/services/languagetool/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=languagetool +IMAGE_URL=erikvl87/languagetool +SERVICEPORT=8010 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/languagetool/docker-compose.yml b/services/languagetool/docker-compose.yml index e6bf54f..cb86d20 100644 --- a/services/languagetool/docker-compose.yml +++ b/services/languagetool/docker-compose.yml @@ -1,26 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-languagetool: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-languagetool # Name for local container management - hostname: languagetool # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/languagetool/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/languagetool/tailscale-languagetool/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:8010:8010 # Binding port 8010 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -29,21 +31,22 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # languagetool - languagetool: - image: erikvl87/languagetool:latest # Image to be used - network_mode: service:tailscale-languagetool # Sidecar configuration to route languagetool through Tailscale - container_name: languagetool # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - langtool_languageModel=/ngrams # OPTIONAL: Using ngrams data - Java_Xms=512m # OPTIONAL: Setting a minimal Java heap size of 512 mib - Java_Xmx=1g # OPTIONAL: Setting a maximum Java heap size of 1 Gib + - TZ=Europe/Amsterdam volumes: - - ${PWD}/languagetool/ngrams:/ngrams + - ${PWD}/${SERVICE}/ngrams:/ngrams depends_on: - - tailscale-languagetool + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "languagetool"] # Check if languagetool process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/nextcloud/.env b/services/nextcloud/.env new file mode 100644 index 0000000..a83f738 --- /dev/null +++ b/services/nextcloud/.env @@ -0,0 +1,10 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=nextcloud +IMAGE_URL=nextcloud +SERVICEPORT=80 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 +MYSQL_ROOT_PASSWORD= //Insert super root strong password +MYSQL_PASSWORD= //Insert super strong password \ No newline at end of file diff --git a/services/nextcloud/docker-compose.yml b/services/nextcloud/docker-compose.yml index a95a53e..09a64ca 100644 --- a/services/nextcloud/docker-compose.yml +++ b/services/nextcloud/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-nextcloud: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-nextcloud # Name for local container management - hostname: nextcloud # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/nextcloud/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/nextcloud/tailscale-nextcloud/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,23 +31,24 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # NextCloud Server - nextcloud: - image: nextcloud:latest # Image to be used - network_mode: service:tailscale-nextcloud # Sidecar configuration to route NextCloud through Tailscale - container_name: nextcloud # Name for local container management - volumes: - - /mnt/nextcloud_data/:/var/www/html + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - - MYSQL_PASSWORD= + - MYSQL_PASSWORD=${MYSQL_ROOT_PASSWORD} - MYSQL_DATABASE=nextcloud - MYSQL_USER=nextcloud - MYSQL_HOST=db # Please note, this variable should reflect the service name of the database itself, in this case db + - TZ=Europe/Amsterdam + volumes: + - ${PWD}/${SERVICE}/nextcloud_data/:/var/www/html depends_on: + - tailscale - db - - tailscale-nextcloud healthcheck: - test: ["CMD", "pgrep", "-f", "apache2"] # Check if Apache (used by Nextcloud) is running + test: ["CMD", "pgrep", "-f", "apache2"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy @@ -56,8 +60,8 @@ services: image: mariadb:latest # Image to be used container_name: nextcloud_db # Name for local container management environment: - - MYSQL_ROOT_PASSWORD= - - MYSQL_PASSWORD= + - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} + - MYSQL_PASSWORD=${MYSQL_PASSWORD} - MYSQL_DATABASE=nextcloud - MYSQL_USER=nextcloud volumes: diff --git a/services/pihole/.env b/services/pihole/.env new file mode 100644 index 0000000..eccd931 --- /dev/null +++ b/services/pihole/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=pihole +IMAGE_URL=pihole/pihole +SERVICEPORT=80 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/pihole/docker-compose.yml b/services/pihole/docker-compose.yml index 879e4ee..00a380e 100644 --- a/services/pihole/docker-compose.yml +++ b/services/pihole/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-pihole: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-pihole # Name for local container management - hostname: dns # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/pihole/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/pihole/tailscale-pihole/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - "0.0.0.0:53:53/udp" # Binding port 53/udp to the local network - may be removed if only exposure to your Tailnet is required - - "0.0.0.0:53:53/tcp" # Binding port 53/tcp to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required + # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below + # dns: + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,23 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Pi-Hole - pihole: - image: pihole/pihole:latest - network_mode: service:tailscale-pihole # Sidecar configuration to route Pi-hole through Tailscale - container_name: pihole + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - TZ: 'Europe/Amsterdam' - # WEBPASSWORD: 'set a secure password here or it will be random' - # Volumes store your data between container upgrades + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam volumes: - - ${PWD}/pihole/etc-pihole:/etc/pihole - - ${PWD}/pihole/etc-dnsmasq.d:/etc/dnsmasq.d + - ${PWD}/${SERVICE}/etc-pihole:/etc/pihole + - ${PWD}/${SERVICE}/etc-dnsmasq.d:/etc/dnsmasq.d # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities depends_on: - - tailscale-pihole + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "pihole-FTL"] # Check if AdGuard Home process is running + test: ["CMD", "pgrep", "-f", "pihole-FTL"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/plex/.env b/services/plex/.env new file mode 100644 index 0000000..d5d813f --- /dev/null +++ b/services/plex/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=plex +IMAGE_URL=lscr.io/linuxserver/plex +SERVICEPORT=32400 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/plex/docker-compose.yml b/services/plex/docker-compose.yml index 62df3ad..099ed27 100644 --- a/services/plex/docker-compose.yml +++ b/services/plex/docker-compose.yml @@ -1,27 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-plex: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-plex # Name for local container management - hostname: plex # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/plex/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/plex/tailscale-plex/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:32400:32400 # Binding port 32400 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -30,11 +31,11 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Plex Media Server - plex: - image: lscr.io/linuxserver/plex:latest # Image to be used - network_mode: service:tailscale-plex # Sidecar configuration to route Plex through Tailscale - container_name: plex # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 @@ -42,13 +43,13 @@ services: - VERSION=docker - PLEX_CLAIM= #optional volumes: - - ${PWD}/plex/config:/config - - ${PWD}/media/tvseries:/tv - - ${PWD}/media/movies:/movies + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/media/tvseries:/tv + - ${PWD}/${SERVICE}/media/movies:/movies depends_on: - - tailscale-plex + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "Plex Media Server"] # Check if Plex process is running + test: ["CMD", "pgrep", "-f", "Plex Media Server"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/portainer/.env b/services/portainer/.env new file mode 100644 index 0000000..92b523e --- /dev/null +++ b/services/portainer/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=portainer +IMAGE_URL=portainer/portainer-ce +SERVICEPORT=9000 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/portainer/docker-compose.yml b/services/portainer/docker-compose.yml index 4da8a7c..d9a8e8b 100644 --- a/services/portainer/docker-compose.yml +++ b/services/portainer/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-portainer: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-portainer # Name for local container management - hostname: portainer # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/portainer/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/portainer/tailscale-portainer/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,17 +31,24 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Portainer - portainer: - image: portainer/portainer-ce:latest # Image to be used - network_mode: service:tailscale-portainer # Sidecar configuration to route Portainer through Tailscale - container_name: portainer # Name for local container management - restart: always + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam volumes: - /var/run/docker.sock:/var/run/docker.sock - - portainer_data:/data + - ${PWD}/${SERVICE}/portainer_data:/data depends_on: - - tailscale-portainer - -volumes: - portainer_data: \ No newline at end of file + - tailscale + healthcheck: + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running + interval: 1m # How often to perform the check + timeout: 10s # Time to wait for the check to succeed + retries: 3 # Number of retries before marking as unhealthy + start_period: 30s # Time to wait before starting health checks + restart: always \ No newline at end of file diff --git a/services/qbittorrent/.env b/services/qbittorrent/.env new file mode 100644 index 0000000..db9006a --- /dev/null +++ b/services/qbittorrent/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=qbittorrent +IMAGE_URL=lscr.io/linuxserver/qbittorrent +SERVICEPORT=8080 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/qbittorrent/docker-compose.yml b/services/qbittorrent/docker-compose.yml index 3a99783..f744376 100644 --- a/services/qbittorrent/docker-compose.yml +++ b/services/qbittorrent/docker-compose.yml @@ -1,26 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-qbittorrent: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-qbittorrent # Name for local container management - hostname: qbittorrent # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/qbittorrent/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/qbittorrent/tailscale-qbittorrent/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:8080:8080 # Binding port 8080 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -29,11 +31,11 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # qbittorrent - qbittorrent: - image: lscr.io/linuxserver/qbittorrent:latest # Image to be used - network_mode: service:tailscale-qbittorrent # Sidecar configuration to route qbittorrent through Tailscale - container_name: qbittorrent # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 @@ -41,12 +43,12 @@ services: - WEBUI_PORT=8080 # - TORRENTING_PORT=6881 #optional volumes: - - ${PWD}/qbittorrent/config:/config - - ${PWD}/downloadclient-downloads:/downloads + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/downloads:/downloads depends_on: - - tailscale-qbittorrent + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "qbittorrent"] # Check if qbittorrent process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/radarr/.env b/services/radarr/.env new file mode 100644 index 0000000..6252dcf --- /dev/null +++ b/services/radarr/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=radarr +IMAGE_URL=radarr/server +SERVICEPORT=7878 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/radarr/docker-compose.yml b/services/radarr/docker-compose.yml index d1283d2..1f23799 100644 --- a/services/radarr/docker-compose.yml +++ b/services/radarr/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-radarr: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-radarr # Name for local container management - hostname: radarr # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/radarr/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/radarr/tailscale-radarr/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,23 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # radarr - radarr: - image: radarr/server:latest - network_mode: service:tailscale-radarr # Sidecar configuration to route radarr through Tailscale - container_name: radarr + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 - - TZ=TZ=Europe/Amsterdam + - TZ=Europe/Amsterdam volumes: - - ${PWD}/radarr/config:/config - - ${PWD}/media/movies:/movies #Optional - - ${PWD}/downloadclient-downloads:/downloads' #Optional + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/media/movies:/movies #Optional + - ${PWD}/${SERVICE}/downloads:/downloads #Optional depends_on: - - tailscale-radarr + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "Radarr"] # Check if Radarr process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/resilio-sync/.env b/services/resilio-sync/.env new file mode 100644 index 0000000..9573b3f --- /dev/null +++ b/services/resilio-sync/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=resilio-sync +IMAGE_URL=linuxserver/resilio-sync +SERVICEPORT=8888 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/resilio-sync/docker-compose.yml b/services/resilio-sync/docker-compose.yml index 4a10228..5795a44 100644 --- a/services/resilio-sync/docker-compose.yml +++ b/services/resilio-sync/docker-compose.yml @@ -1,22 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-resilio-sync: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-resilio-sync # Name for local container management - hostname: resilio-sync # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/resilio-sync/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/resilio-sync/tailscale-resilio-sync/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required + # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below + # dns: + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -25,24 +31,24 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Resilio Sync - resilio-sync: - image: linuxserver/resilio-sync:latest - network_mode: service:tailscale-resilio-sync - container_name: resilio-sync + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 - TZ=Europe/Amsterdam volumes: - - ${PWD}/resilio-sync-data/config:/config - - ${PWD}/resilio-sync-data/downloads:/downloads - - ${PWD}/resilio-sync-data:/sync + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/downloads:/downloads + - ${PWD}/${SERVICE}/data:/sync depends_on: - - tailscale-resilio-sync + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "rslsync"] # Check if Resilio Sync (rslsync process) is running - interval: 5m # How often to perform the check + test: ["CMD", "pgrep", "-f", "rslsync"] # Check if ${SERVICE} process is running + interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy start_period: 30s # Time to wait before starting health checks diff --git a/services/searxng/.env b/services/searxng/.env new file mode 100644 index 0000000..5d47c2c --- /dev/null +++ b/services/searxng/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=searxng +IMAGE_URL=docker.io/searxng/searxng +SERVICEPORT=8080 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/searxng/docker-compose.yml b/services/searxng/docker-compose.yml index 7c3937f..4b8dc4a 100644 --- a/services/searxng/docker-compose.yml +++ b/services/searxng/docker-compose.yml @@ -1,27 +1,28 @@ services: -# Make sure you replace SERVICE and SERVICEPORT. Environmentvariables may differ, check documentation of container maintaner. +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-searxng: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-searxng # Name for local container management - hostname: searxng # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/searxng/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/searxng/tailscale-searxng/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - # ports: - # - 0.0.0.0:SERVICEPORT:SERVICEPORT # Binding port SERVICEPORT to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -30,26 +31,33 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - searxng: - container_name: searxng - image: docker.io/searxng/searxng:latest - network_mode: service:tailscale-searxng - restart: unless-stopped - volumes: - - ./searxng:/etc/searxng:rw + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam - SEARXNG_BASE_URL=https://searxng..ts.net/ + volumes: + - ./searxng:/etc/searxng:rw cap_drop: - ALL cap_add: - CHOWN - SETGID - SETUID + restart: always logging: driver: "json-file" options: max-size: "1m" max-file: "1" + depends_on: + - tailscale + - redis redis: container_name: redis-searxng diff --git a/services/sonarr/.env b/services/sonarr/.env new file mode 100644 index 0000000..347484a --- /dev/null +++ b/services/sonarr/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=sonarr +IMAGE_URL=lscr.io/linuxserver/sonarr +SERVICEPORT=8989 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/sonarr/docker-compose.yml b/services/sonarr/docker-compose.yml index 43f67e7..b76995f 100644 --- a/services/sonarr/docker-compose.yml +++ b/services/sonarr/docker-compose.yml @@ -1,26 +1,28 @@ services: +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-sonarr: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-sonarr # Name for local container management - hostname: sonarr # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/sonarr/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/sonarr/tailscale-sonarr/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement - ports: - - 0.0.0.0:8989:8989 # Binding port 8989 to the local network - may be removed if only exposure to your Tailnet is required + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -29,23 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # sonarr - sonarr: - image: lscr.io/linuxserver/sonarr:latest # Image to be used - network_mode: service:tailscale-sonarr # Sidecar configuration to route Sonarr through Tailscale - container_name: sonarr # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 - TZ=Europe/Amsterdam volumes: - - ${PWD}/sonarr/config:/config - - ${PWD}/media/tvseries:/tv - - ${PWD}/downloadclient-downloads:/downloads + - ${PWD}/${SERVICE}/config:/config + - ${PWD}/${SERVICE}/media/tvseries:/tv + - ${PWD}/${SERVICE}/downloads:/downloads depends_on: - - tailscale-sonarr + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "Sonarr"] # Check if Sonarr process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/stirlingpdf/.env b/services/stirlingpdf/.env new file mode 100644 index 0000000..29d49de --- /dev/null +++ b/services/stirlingpdf/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=stirlingpdf +IMAGE_URL=frooodle/s-pdf +SERVICEPORT=8080 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/stirlingpdf/docker-compose.yml b/services/stirlingpdf/docker-compose.yml index 0a27ae4..af50209 100644 --- a/services/stirlingpdf/docker-compose.yml +++ b/services/stirlingpdf/docker-compose.yml @@ -1,22 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-stirlingpdf: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-stirlingpdf # Name for local container management - hostname: pdf # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/stirlingpdf/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/stirlingpdf/tailscale-stirlingpdf/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required + # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below + # dns: + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -25,24 +31,24 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Stirling-PDF - stirling-pdf: - image: frooodle/s-pdf:latest # Image to be used - network_mode: service:tailscale-stirlingpdf # Sidecar configuration to route Stirling-PDF through Tailscale - container_name: stirlingpdf # Name for local container management - volumes: - - ${PWD}/stirlingpdf/trainingData:/usr/share/tessdata # Required for extra OCR languages - - ${PWD}/stirlingpdf/extraConfigs:/configs -# - ${PWD}/stirlingpdf/customFiles:/customFiles/ # May be enabled if desired -# - ${PWD}/stirlingpdf/logs:/logs/ # May be enabled if desired + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - DOCKER_ENABLE_SECURITY=false - INSTALL_BOOK_AND_ADVANCED_HTML_OPS=false - LANGS=en_GB + volumes: + - ${PWD}/${SERVICE}/trainingData:/usr/share/tessdata # Required for extra OCR languages + - ${PWD}/${SERVICE}/extraConfigs:/configs +# - ${PWD}/${SERVICE}/customFiles:/customFiles/ # May be enabled if desired +# - ${PWD}/${SERVICE}/logs:/logs/ # May be enabled if desired depends_on: - - tailscale-stirlingpdf + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "app.jar"] # Check if Stirling-PDF process is running + test: ["CMD", "pgrep", "-f", "app.jar"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy diff --git a/services/tailscale-exit-node/.env b/services/tailscale-exit-node/.env new file mode 100644 index 0000000..4de9511 --- /dev/null +++ b/services/tailscale-exit-node/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=tailscale-exit-node +IMAGE_URL=tailscale/tailscale +#SERVICEPORT=80 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/tailscale-exit-node/docker-compose.yml b/services/tailscale-exit-node/docker-compose.yml index add0547..c05d144 100644 --- a/services/tailscale-exit-node/docker-compose.yml +++ b/services/tailscale-exit-node/docker-compose.yml @@ -1,27 +1,28 @@ services: - - # Tailscale Exit Node +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. + # Tailscale Sidecar Configuration tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-exit-node # Name for local container management - hostname: tailscale-exit-node # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_EXTRA_ARGS=--advertise-exit-node - TS_USERSPACE=false volumes: - - ${PWD}/tailscale-exit-node/config:/config - - ${PWD}/tailscale-exit-node/tailscale-node/state:/var/lib/tailscale - - /dev/net/tun:/dev/net/tun + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work dns: - 1.1.1.1 # Can be changed to your desired DNS provider sysctls: net.ipv4.ip_forward: 1 net.ipv6.conf.all.forwarding: 1 cap_add: - - NET_ADMIN - - SYS_MODULE + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement devices: - /dev/net/tun network_mode: bridge diff --git a/services/tautulli/.env b/services/tautulli/.env new file mode 100644 index 0000000..22fa0aa --- /dev/null +++ b/services/tautulli/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=tautulli +IMAGE_URL=lscr.io/linuxserver/tautulli +SERVICEPORT=8181 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/tautulli/docker-compose.yml b/services/tautulli/docker-compose.yml index 2278ef5..32129e6 100644 --- a/services/tautulli/docker-compose.yml +++ b/services/tautulli/docker-compose.yml @@ -1,27 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-tautulli: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-tautulli # Name for local container management - hostname: tautulli # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/tautulli/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/tautulli/tailscale-tautulli/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module - ports: - - 0.0.0.0:8181:8181 # Binding port 8181 to the local network - may be removed if only exposure to your Tailnet is required + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -30,24 +31,23 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Tautulli - tautulli: - image: lscr.io/linuxserver/tautulli:latest - network_mode: service:tailscale-tautulli # Sidecar configuration to route Tautulli through Tailscale - container_name: tautulli + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: - PUID=1000 - PGID=1000 - - TZ=Europe/Amsterdam + - TZ=Europe/Amsterdam volumes: - - ${PWD}/tautulli/config:/config + - ${PWD}/${SERVICE}/app/config:/config depends_on: - - tailscale-tautulli + - tailscale healthcheck: - test: ["CMD", "pgrep", "-f", "Tautulli"] # Check if Tautilli process is running + test: ["CMD", "pgrep", "-f", "${SERVICE}"] # Check if ${SERVICE} process is running interval: 1m # How often to perform the check timeout: 10s # Time to wait for the check to succeed retries: 3 # Number of retries before marking as unhealthy start_period: 30s # Time to wait before starting health checks - restart: always - \ No newline at end of file + restart: always \ No newline at end of file diff --git a/services/uptime-kuma/.env b/services/uptime-kuma/.env new file mode 100644 index 0000000..6208062 --- /dev/null +++ b/services/uptime-kuma/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=uptime-kuma +IMAGE_URL=louislam/uptime-kuma +SERVICEPORT=3001 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/uptime-kuma/docker-compose.yml b/services/uptime-kuma/docker-compose.yml index ab45107..0fd0c7a 100644 --- a/services/uptime-kuma/docker-compose.yml +++ b/services/uptime-kuma/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-uptime-kuma: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-uptime-kuma # Name for local container management - hostname: uptime # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/uptime-kuma/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/uptime-kuma/tailscale-uptime-kuma/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - - net_admin - - sys_module + - net_admin # Tailscale requirement + - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,14 +31,18 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # uptime-kuma - uptime-kuma: - image: louislam/uptime-kuma:latest # Image to be used - network_mode: service:tailscale-uptime-kuma # Sidecar configuration to route uptime-kuma through Tailscale - container_name: uptime-kuma # Name for local container management + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/Amsterdam volumes: - - ${PWD}/uptime-kuma/uptime-kuma-data:/app/data # uptime-kuma data/configuration folder + - ${PWD}/${SERVICE}/uptime-kuma-data:/app/data # uptime-kuma data/configuration folder - /var/run/docker.sock:/var/run/docker.sock:ro # Read-only access to the docker.sock depends_on: - - tailscale-uptime-kuma - restart: always \ No newline at end of file + - tailscale + restart: always diff --git a/services/vaultwarden/.env b/services/vaultwarden/.env new file mode 100644 index 0000000..7e87f4b --- /dev/null +++ b/services/vaultwarden/.env @@ -0,0 +1,8 @@ +#version=1.0 +#url=https://github.com/2Tiny2Scale/tailscale-docker-sidecar-configs +#COMPOSE_PROJECT_NAME= // only use in multiple deployments on the same infra +SERVICE=vaultwarden +IMAGE_URL=vaultwarden/server +SERVICEPORT=80 +TS_AUTHKEY= //Insert Tailscale key here from the Admin Portal +DNS_SERVER=1.1.1.1 diff --git a/services/vaultwarden/docker-compose.yml b/services/vaultwarden/docker-compose.yml index 02f324d..b0d4a05 100644 --- a/services/vaultwarden/docker-compose.yml +++ b/services/vaultwarden/docker-compose.yml @@ -1,25 +1,28 @@ services: - +# Make sure you have updated/checked the .env file with the correct variables. +# All the ${ xx } need to be defined there. # Tailscale Sidecar Configuration - tailscale-vaultwarden: + tailscale: image: tailscale/tailscale:latest # Image to be used - container_name: tailscale-vaultwarden # Name for local container management - hostname: vaultwarden # Name used within your Tailscale environment + container_name: ${SERVICE} # Name for local container management + hostname: ${SERVICE} # Name used within your Tailscale environment environment: - - TS_AUTHKEY=tskey-auth- + - TS_AUTHKEY=${TS_AUTHKEY} - TS_STATE_DIR=/var/lib/tailscale - TS_SERVE_CONFIG=/config/serve.json # Tailsacale Serve configuration to expose the web interface on your local Tailnet - remove this line if not required - TS_USERSPACE=false volumes: - - ${PWD}/vaultwarden/config:/config # Config folder used to store Tailscale files - you may need to change the path - - ${PWD}/vaultwarden/tailscale-vaultwarden/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path + - ${PWD}/${SERVICE}/ts/config:/config # Config folder used to store Tailscale files - you may need to change the path + - ${PWD}/${SERVICE}/ts/state:/var/lib/tailscale # Tailscale requirement - you may need to change the path - /dev/net/tun:/dev/net/tun # Network configuration for Tailscale to work cap_add: - net_admin # Tailscale requirement - sys_module # Tailscale requirement + #ports: + # - 0.0.0.0:${SERVICEPORT}:${SERVICEPORT} # Binding port ${SERVICE}PORT to the local network - may be removed if only exposure to your Tailnet is required # If any DNS issues arise, use your preferred DNS provider by uncommenting the config below # dns: - # - 1.1.1.1 + # - ${DNS_SERVER} healthcheck: test: ["CMD", "tailscale", "status"] # Check if Tailscale is running interval: 1m # How often to perform the check @@ -28,16 +31,16 @@ services: start_period: 10s # Time to wait before starting health checks restart: always - # Vaultwarden - vaultwarden: - image: vaultwarden/server:latest - network_mode: service:tailscale-vaultwarden # Sidecar configuration to route Vaultwarden through Tailscale - container_name: vaultwarden + # ${SERVICE} + application: + image: ${IMAGE_URL} # Image to be used + network_mode: service:tailscale # Sidecar configuration to route ${SERVICE} through Tailscale + container_name: app-${SERVICE} # Name for local container management environment: # DOMAIN: "https://vaultwarden.example.com" # required when using a reverse proxy; your domain; vaultwarden needs to know it's https to work properly with attachments SIGNUPS_ALLOWED: "true" # Deactivate this with "false" after you have created your account so that no strangers can register volumes: - - ${PWD}/vaultwarden/vw-data:/data + - ${PWD}/${SERVICE}/vw-data:/data depends_on: - - tailscale-vaultwarden + - tailscale restart: always \ No newline at end of file