TEST: Complex docker-compose rework. Not tested yet
This commit is contained in:
@@ -14,11 +14,14 @@ services:
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 6875:80
|
||||
- "6875:80"
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- bookstack_database
|
||||
|
||||
bookstack_database:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- bookstack-net
|
||||
|
||||
bookstack_database:
|
||||
image: lscr.io/linuxserver/mariadb
|
||||
container_name: bookstack_database
|
||||
@@ -32,4 +35,15 @@ services:
|
||||
- MYSQL_PASSWORD=${DB_USER_PASS}
|
||||
volumes:
|
||||
- ${PATH_TO_DB}:/config
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- bookstack-net
|
||||
|
||||
networks:
|
||||
bookstack-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -3,35 +3,13 @@ services:
|
||||
image: favonia/cloudflare-ddns:latest
|
||||
container_name: cloudflare-ddns
|
||||
env_file: .env
|
||||
# Choose the appropriate tag based on your need:
|
||||
# - "latest" for the latest stable version (which could become 2.x.y
|
||||
# in the future and break things)
|
||||
# - "1" for the latest stable version whose major version is 1
|
||||
# - "1.x.y" to pin the specific version 1.x.y
|
||||
network_mode: host
|
||||
# This bypasses network isolation and makes IPv6 easier (optional; see below)
|
||||
restart: always
|
||||
# Restart the updater after reboot
|
||||
user: "1000:1000"
|
||||
# Run the updater with specific user and group IDs (in that order).
|
||||
# You can change the two numbers based on your need.
|
||||
read_only: true
|
||||
# Make the container filesystem read-only (optional but recommended)
|
||||
cap_drop: [all]
|
||||
# Drop all Linux capabilities (optional but recommended)
|
||||
security_opt: [no-new-privileges:true]
|
||||
# Another protection to restrict superuser privileges (optional but recommended)
|
||||
environment:
|
||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||
# Your Cloudflare API token
|
||||
- DOMAINS=${DOMAINS}
|
||||
# Your domains (separated by commas)
|
||||
- PROXIED=true
|
||||
# Tell Cloudflare to cache webpages and hide your IP (optional)
|
||||
#networks:
|
||||
# LAN0:
|
||||
# external: true
|
||||
# name: LAN0
|
||||
# Introduce custom Docker networks to the 'services' in this file. A common use case
|
||||
# for this is binding one of the 'services' to a specific network interface available at
|
||||
# Docker's host. This section is required for the 'networks' section of each 'services'.
|
||||
15
garmin-grafana/.env.example
Normal file
15
garmin-grafana/.env.example
Normal file
@@ -0,0 +1,15 @@
|
||||
# InfluxDB credentials
|
||||
INFLUXDB_PASSWORD=influxdb_secret_password
|
||||
|
||||
# Grafana credentials
|
||||
GF_SECURITY_ADMIN_USER=admin
|
||||
GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
|
||||
# Garmin Connect credentials
|
||||
GARMINCONNECT_EMAIL=your_garmin_email@example.com
|
||||
GARMINCONNECT_PASSWORD=your_garmin_password_base64_encoded
|
||||
|
||||
# Paths for persistent data
|
||||
GARMINCONNECT_TOKENS=./garminconnect-tokens
|
||||
PATH_TO_INFLUXDB_DATA=./influxdb_data
|
||||
PATH_TO_GRAFANA_DATA=./grafana_data
|
||||
@@ -4,49 +4,65 @@ services:
|
||||
image: thisisarpanghosh/garmin-fetch-data:latest
|
||||
container_name: garmin-fetch-data
|
||||
depends_on:
|
||||
- influxdb
|
||||
influxdb:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- ${GARMINCONNECT_TOKENS}:/home/appuser/.garminconnect # (persistant tokens storage - garminconnect-tokens folder must be owned by 1000:1000)
|
||||
environment:
|
||||
- INFLUXDB_HOST=influxdb
|
||||
- INFLUXDB_PORT=8086
|
||||
- INFLUXDB_USERNAME=influxdb_user
|
||||
- INFLUXDB_PASSWORD=influxdb_secret_password
|
||||
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
|
||||
- INFLUXDB_DATABASE=GarminStats
|
||||
- UPDATE_INTERVAL_SECONDS=300
|
||||
- LOG_LEVEL=INFO
|
||||
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
|
||||
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
|
||||
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
|
||||
influxdb:
|
||||
restart: unless-stopped
|
||||
container_name: influxdb
|
||||
hostname: influxdb
|
||||
image: influxdb:latest
|
||||
environment:
|
||||
- INFLUXDB_DB=GarminStats
|
||||
- INFLUXDB_USER=influxdb_user
|
||||
- INFLUXDB_USER_PASSWORD=influxdb_secret_password
|
||||
- INFLUXDB_USER_PASSWORD=${INFLUXDB_PASSWORD}
|
||||
- INFLUXDB_DATA_INDEX_VERSION=tsi1
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- influxdb_data:/var/lib/influxdb
|
||||
image: 'influxdb:1.11'
|
||||
- ${PATH_TO_INFLUXDB_DATA}:/var/lib/influxdb
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
|
||||
grafana:
|
||||
restart: unless-stopped
|
||||
container_name: grafana
|
||||
hostname: grafana
|
||||
image: grafana/grafana:latest
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ${PATH_TO_GRAFANA_DATA}:/var/lib/grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
image: 'grafana/grafana:latest'
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
|
||||
volumes:
|
||||
influxdb_data:
|
||||
grafana_data:
|
||||
networks:
|
||||
garmin-grafana-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -1,12 +1,23 @@
|
||||
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
||||
# I don't know if it has any limitations. For my needs it fits perfectly.
|
||||
|
||||
services:
|
||||
guacamole:
|
||||
image: abesnier/guacamole:latest
|
||||
restart: unless-stopped
|
||||
container_name: guacamole
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 8080:8080
|
||||
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
||||
# I don't know if it has any limitations. For my needs it fits perfectly.
|
||||
|
||||
services:
|
||||
guacamole:
|
||||
image: abesnier/guacamole:latest
|
||||
restart: unless-stopped
|
||||
container_name: guacamole
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 8080:8080
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- guacamole-net
|
||||
|
||||
networks:
|
||||
guacamole-net:
|
||||
driver: bridge
|
||||
2
heimdall/.env.example
Normal file
2
heimdall/.env.example
Normal file
@@ -0,0 +1,2 @@
|
||||
# Path to the Heimdall config folder
|
||||
PATH_TO_CONFIG=/home/mbuz/docker/heimdall/config
|
||||
@@ -1,5 +1,3 @@
|
||||
version: "2.1"
|
||||
|
||||
services:
|
||||
heimdall:
|
||||
image: lscr.io/linuxserver/heimdall:latest
|
||||
@@ -9,8 +7,19 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=Europe/Warsaw
|
||||
volumes:
|
||||
- /home/mbuz/docker/heimdall/config:/config
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 80:80
|
||||
- 443:443
|
||||
restart: unless-stopped
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- heimdall-net
|
||||
|
||||
networks:
|
||||
heimdall-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -1,12 +1,23 @@
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage:latest
|
||||
container_name: homepage
|
||||
ports:
|
||||
- 3001:3000
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
||||
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
||||
env_file:
|
||||
- .env
|
||||
restart: unless-stopped
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage:latest
|
||||
container_name: homepage
|
||||
ports:
|
||||
- "3001:3000"
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
||||
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
||||
env_file:
|
||||
- .env
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- homepage-net
|
||||
|
||||
networks:
|
||||
homepage-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -30,6 +30,8 @@ services:
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
immich-machine-learning:
|
||||
container_name: immich_machine_learning
|
||||
@@ -46,6 +48,8 @@ services:
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
@@ -53,6 +57,8 @@ services:
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
@@ -84,6 +90,12 @@ services:
|
||||
-c shared_buffers=512MB
|
||||
-c wal_compression=on
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
volumes:
|
||||
model-cache:
|
||||
|
||||
networks:
|
||||
immich-net:
|
||||
driver: bridge
|
||||
@@ -4,4 +4,15 @@ services:
|
||||
ports:
|
||||
- '8182:80' # change if needed
|
||||
restart: unless-stopped
|
||||
container_name: it-tools
|
||||
container_name: it-tools
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- it-tools-net
|
||||
|
||||
networks:
|
||||
it-tools-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -19,4 +19,15 @@ services:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
TZ: Europe/Warsaw
|
||||
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
|
||||
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- mealie-net
|
||||
|
||||
networks:
|
||||
mealie-net:
|
||||
driver: bridge
|
||||
8
n8n/.env.example
Normal file
8
n8n/.env.example
Normal file
@@ -0,0 +1,8 @@
|
||||
# Domain for n8n
|
||||
DOMAIN=n8n.example.com
|
||||
|
||||
# Your timezone
|
||||
GENERIC_TIMEZONE=Europe/Warsaw
|
||||
|
||||
# Directory where n8n will store files
|
||||
PATH_TO_FILES=/path/to/n8n/files
|
||||
24
n8n/docker-compose.yml
Normal file
24
n8n/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
n8n:
|
||||
image: docker.n8n.io/n8nio/n8n
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
- N8N_HOST=${DOMAIN}
|
||||
- N8N_PORT=5678
|
||||
- N8N_PROTOCOL=https
|
||||
- NODE_ENV=production
|
||||
- WEBHOOK_URL=https://${DOMAIN}/
|
||||
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
|
||||
volumes:
|
||||
- n8n_data:/home/node/.n8n
|
||||
- ${PATH_TO_FILES}:/files
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:5678/healthz || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
volumes:
|
||||
n8n_data:
|
||||
@@ -1,31 +1,35 @@
|
||||
version: '3.3'
|
||||
services:
|
||||
nextcloud:
|
||||
nextcloud:
|
||||
image: lscr.io/linuxserver/nextcloud:latest
|
||||
container_name: nextcloud
|
||||
env_file:
|
||||
- stack.env
|
||||
- .env
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
|
||||
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
|
||||
- TZ=${TZ}
|
||||
|
||||
volumes:
|
||||
- ${CONFIG}:/config
|
||||
- ${DATA}:/data
|
||||
ports:
|
||||
- 5443:443
|
||||
- "5443:443"
|
||||
restart: unless-stopped
|
||||
links:
|
||||
- nextcloud-mariadb
|
||||
depends_on:
|
||||
- nextcloud-mariadb
|
||||
|
||||
nextcloud-mariadb:
|
||||
nextcloud-mariadb:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nextcloud
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
nextcloud-mariadb:
|
||||
image: lscr.io/linuxserver/mariadb:latest
|
||||
container_name: nextloud-mariadb
|
||||
container_name: nextcloud-mariadb
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
@@ -34,15 +38,20 @@ services:
|
||||
- MYSQL_DATABASE=nextcloud
|
||||
- MYSQL_USER=nextcloud
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
|
||||
volumes:
|
||||
- ${MARIADB}:/config
|
||||
ports:
|
||||
- 5306:3306
|
||||
- "5306:3306"
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- nextcloud
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
|
||||
networks:
|
||||
default:
|
||||
nextcloud:
|
||||
name: nextcloud
|
||||
driver: bridge
|
||||
driver: bridge
|
||||
|
||||
@@ -11,4 +11,9 @@ services:
|
||||
volumes:
|
||||
- ${PGADMIN_DATA}:/var/lib/pgadmin
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- "host.docker.internal:host-gateway"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/misc/ping || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -8,4 +8,9 @@ services:
|
||||
volumes:
|
||||
- ${PORTAINER_DATA}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/api/status"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -1,17 +1,33 @@
|
||||
services:
|
||||
semaphore:
|
||||
ports:
|
||||
- 3030:3000
|
||||
image: semaphoreui/semaphore:v2.16.18
|
||||
environment:
|
||||
SEMAPHORE_DB_DIALECT: sqlite
|
||||
SEMAPHORE_ADMIN: ${ADMIN_USER}
|
||||
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
|
||||
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
|
||||
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
|
||||
volumes:
|
||||
- semaphore-data:/var/lib/semaphore
|
||||
- semaphore-config:/etc/semaphore
|
||||
semaphore:
|
||||
container_name: semaphore-ui
|
||||
image: semaphoreui/semaphore:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "3030:3000"
|
||||
environment:
|
||||
SEMAPHORE_DB_DIALECT: sqlite
|
||||
SEMAPHORE_ADMIN: ${ADMIN_USER}
|
||||
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
|
||||
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
|
||||
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
|
||||
volumes:
|
||||
- semaphore-data:/var/lib/semaphore
|
||||
- semaphore-config:/etc/semaphore
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- semaphore-net
|
||||
|
||||
volumes:
|
||||
semaphore-data:
|
||||
semaphore-config:
|
||||
driver: local
|
||||
semaphore-config:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
semaphore-net:
|
||||
driver: bridge
|
||||
@@ -32,6 +32,11 @@ services:
|
||||
max-size: 10m
|
||||
ports:
|
||||
- '9091:9091'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 && ls /data || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
|
||||
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent
|
||||
@@ -1,4 +1,3 @@
|
||||
---
|
||||
services:
|
||||
transmission:
|
||||
image: lscr.io/linuxserver/transmission:latest
|
||||
@@ -22,3 +21,8 @@ services:
|
||||
- 51413:51413
|
||||
- 51413:51413/udp
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -8,4 +8,9 @@ services:
|
||||
volumes:
|
||||
- ${PATH_TO_DATA}:/data
|
||||
ports:
|
||||
- 8033:80
|
||||
- 8033:80
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/ || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
6
watchtower/.env.example
Normal file
6
watchtower/.env.example
Normal file
@@ -0,0 +1,6 @@
|
||||
# Timezone for watchtower
|
||||
TZ=Europe/Warsaw
|
||||
|
||||
# A space-separated list of container names for Watchtower to monitor.
|
||||
# For example: WATCHTOWER_CONTAINERS="nginx-proxy-manager bookstack"
|
||||
WATCHTOWER_CONTAINERS=""
|
||||
13
watchtower/docker-compose.yaml
Normal file
13
watchtower/docker-compose.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
services:
|
||||
watchtower:
|
||||
image: containrrr/watchtower
|
||||
container_name: watchtower
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- WATCHTOWER_CLEANUP=true
|
||||
- WATCHTOWER_INCLUDE_STOPPED=true
|
||||
- WATCHTOWER_POLL_INTERVAL=3600
|
||||
- TZ=${TZ}
|
||||
command: ${WATCHTOWER_CONTAINERS}
|
||||
restart: unless-stopped
|
||||
@@ -13,4 +13,9 @@ services:
|
||||
# devices:
|
||||
# - /dev/dri:/dev/dri #optional
|
||||
shm_size: "2gb" #optional
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
Reference in New Issue
Block a user