Compare commits
3 Commits
docker-ref
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 8a0c432db6 | |||
| 5a9bd17e13 | |||
| 617ec624cb |
@@ -14,14 +14,11 @@ services:
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- "6875:80"
|
||||
- 6875:80
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
bookstack_database:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- bookstack-net
|
||||
|
||||
- bookstack_database
|
||||
|
||||
bookstack_database:
|
||||
image: lscr.io/linuxserver/mariadb
|
||||
container_name: bookstack_database
|
||||
@@ -35,15 +32,4 @@ services:
|
||||
- MYSQL_PASSWORD=${DB_USER_PASS}
|
||||
volumes:
|
||||
- ${PATH_TO_DB}:/config
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- bookstack-net
|
||||
|
||||
networks:
|
||||
bookstack-net:
|
||||
driver: bridge
|
||||
restart: unless-stopped
|
||||
@@ -3,13 +3,35 @@ services:
|
||||
image: favonia/cloudflare-ddns:latest
|
||||
container_name: cloudflare-ddns
|
||||
env_file: .env
|
||||
# Choose the appropriate tag based on your need:
|
||||
# - "latest" for the latest stable version (which could become 2.x.y
|
||||
# in the future and break things)
|
||||
# - "1" for the latest stable version whose major version is 1
|
||||
# - "1.x.y" to pin the specific version 1.x.y
|
||||
network_mode: host
|
||||
# This bypasses network isolation and makes IPv6 easier (optional; see below)
|
||||
restart: always
|
||||
# Restart the updater after reboot
|
||||
user: "1000:1000"
|
||||
# Run the updater with specific user and group IDs (in that order).
|
||||
# You can change the two numbers based on your need.
|
||||
read_only: true
|
||||
# Make the container filesystem read-only (optional but recommended)
|
||||
cap_drop: [all]
|
||||
# Drop all Linux capabilities (optional but recommended)
|
||||
security_opt: [no-new-privileges:true]
|
||||
# Another protection to restrict superuser privileges (optional but recommended)
|
||||
environment:
|
||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||
# Your Cloudflare API token
|
||||
- DOMAINS=${DOMAINS}
|
||||
# Your domains (separated by commas)
|
||||
- PROXIED=true
|
||||
# Tell Cloudflare to cache webpages and hide your IP (optional)
|
||||
#networks:
|
||||
# LAN0:
|
||||
# external: true
|
||||
# name: LAN0
|
||||
# Introduce custom Docker networks to the 'services' in this file. A common use case
|
||||
# for this is binding one of the 'services' to a specific network interface available at
|
||||
# Docker's host. This section is required for the 'networks' section of each 'services'.
|
||||
@@ -1,15 +0,0 @@
|
||||
# InfluxDB credentials
|
||||
INFLUXDB_PASSWORD=influxdb_secret_password
|
||||
|
||||
# Grafana credentials
|
||||
GF_SECURITY_ADMIN_USER=admin
|
||||
GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
|
||||
# Garmin Connect credentials
|
||||
GARMINCONNECT_EMAIL=your_garmin_email@example.com
|
||||
GARMINCONNECT_PASSWORD=your_garmin_password_base64_encoded
|
||||
|
||||
# Paths for persistent data
|
||||
GARMINCONNECT_TOKENS=./garminconnect-tokens
|
||||
PATH_TO_INFLUXDB_DATA=./influxdb_data
|
||||
PATH_TO_GRAFANA_DATA=./grafana_data
|
||||
@@ -4,65 +4,49 @@ services:
|
||||
image: thisisarpanghosh/garmin-fetch-data:latest
|
||||
container_name: garmin-fetch-data
|
||||
depends_on:
|
||||
influxdb:
|
||||
condition: service_healthy
|
||||
- influxdb
|
||||
volumes:
|
||||
- ${GARMINCONNECT_TOKENS}:/home/appuser/.garminconnect # (persistant tokens storage - garminconnect-tokens folder must be owned by 1000:1000)
|
||||
environment:
|
||||
- INFLUXDB_HOST=influxdb
|
||||
- INFLUXDB_PORT=8086
|
||||
- INFLUXDB_USERNAME=influxdb_user
|
||||
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
|
||||
- INFLUXDB_PASSWORD=influxdb_secret_password
|
||||
- INFLUXDB_DATABASE=GarminStats
|
||||
- UPDATE_INTERVAL_SECONDS=300
|
||||
- LOG_LEVEL=INFO
|
||||
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
|
||||
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
|
||||
|
||||
influxdb:
|
||||
restart: unless-stopped
|
||||
container_name: influxdb
|
||||
hostname: influxdb
|
||||
image: influxdb:latest
|
||||
environment:
|
||||
- INFLUXDB_DB=GarminStats
|
||||
- INFLUXDB_USER=influxdb_user
|
||||
- INFLUXDB_USER_PASSWORD=${INFLUXDB_PASSWORD}
|
||||
- INFLUXDB_USER_PASSWORD=influxdb_secret_password
|
||||
- INFLUXDB_DATA_INDEX_VERSION=tsi1
|
||||
ports:
|
||||
- '8086:8086'
|
||||
volumes:
|
||||
- ${PATH_TO_INFLUXDB_DATA}:/var/lib/influxdb
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
- influxdb_data:/var/lib/influxdb
|
||||
image: 'influxdb:1.11'
|
||||
|
||||
grafana:
|
||||
restart: unless-stopped
|
||||
container_name: grafana
|
||||
hostname: grafana
|
||||
image: grafana/grafana:latest
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
volumes:
|
||||
- ${PATH_TO_GRAFANA_DATA}:/var/lib/grafana
|
||||
- grafana_data:/var/lib/grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- garmin-grafana-net
|
||||
image: 'grafana/grafana:latest'
|
||||
|
||||
networks:
|
||||
garmin-grafana-net:
|
||||
driver: bridge
|
||||
volumes:
|
||||
influxdb_data:
|
||||
grafana_data:
|
||||
@@ -1,23 +1,12 @@
|
||||
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
||||
# I don't know if it has any limitations. For my needs it fits perfectly.
|
||||
|
||||
services:
|
||||
guacamole:
|
||||
image: abesnier/guacamole:latest
|
||||
restart: unless-stopped
|
||||
container_name: guacamole
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 8080:8080
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- guacamole-net
|
||||
|
||||
networks:
|
||||
guacamole-net:
|
||||
driver: bridge
|
||||
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
||||
# I don't know if it has any limitations. For my needs it fits perfectly.
|
||||
|
||||
services:
|
||||
guacamole:
|
||||
image: abesnier/guacamole:latest
|
||||
restart: unless-stopped
|
||||
container_name: guacamole
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
ports:
|
||||
- 8080:8080
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
# Path to the Heimdall config folder
|
||||
PATH_TO_CONFIG=/home/mbuz/docker/heimdall/config
|
||||
@@ -1,3 +1,5 @@
|
||||
version: "2.1"
|
||||
|
||||
services:
|
||||
heimdall:
|
||||
image: lscr.io/linuxserver/heimdall:latest
|
||||
@@ -7,19 +9,8 @@ services:
|
||||
- PGID=1000
|
||||
- TZ=Europe/Warsaw
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/config
|
||||
- /home/mbuz/docker/heimdall/config:/config
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- heimdall-net
|
||||
|
||||
networks:
|
||||
heimdall-net:
|
||||
driver: bridge
|
||||
- 80:80
|
||||
- 443:443
|
||||
restart: unless-stopped
|
||||
@@ -1,23 +1,12 @@
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage:latest
|
||||
container_name: homepage
|
||||
ports:
|
||||
- "3001:3000"
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
||||
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
||||
env_file:
|
||||
- .env
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- homepage-net
|
||||
|
||||
networks:
|
||||
homepage-net:
|
||||
driver: bridge
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage:latest
|
||||
container_name: homepage
|
||||
ports:
|
||||
- 3001:3000
|
||||
volumes:
|
||||
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
||||
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
||||
env_file:
|
||||
- .env
|
||||
restart: unless-stopped
|
||||
@@ -30,8 +30,6 @@ services:
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
immich-machine-learning:
|
||||
container_name: immich_machine_learning
|
||||
@@ -48,8 +46,6 @@ services:
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
disable: false
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
redis:
|
||||
container_name: immich_redis
|
||||
@@ -57,8 +53,6 @@ services:
|
||||
healthcheck:
|
||||
test: redis-cli ping || exit 1
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
database:
|
||||
container_name: immich_postgres
|
||||
@@ -90,12 +84,6 @@ services:
|
||||
-c shared_buffers=512MB
|
||||
-c wal_compression=on
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- immich-net
|
||||
|
||||
volumes:
|
||||
model-cache:
|
||||
|
||||
networks:
|
||||
immich-net:
|
||||
driver: bridge
|
||||
@@ -4,15 +4,4 @@ services:
|
||||
ports:
|
||||
- '8182:80' # change if needed
|
||||
restart: unless-stopped
|
||||
container_name: it-tools
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- it-tools-net
|
||||
|
||||
networks:
|
||||
it-tools-net:
|
||||
driver: bridge
|
||||
container_name: it-tools
|
||||
@@ -19,15 +19,4 @@ services:
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
TZ: Europe/Warsaw
|
||||
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- mealie-net
|
||||
|
||||
networks:
|
||||
mealie-net:
|
||||
driver: bridge
|
||||
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
|
||||
@@ -1,8 +0,0 @@
|
||||
# Domain for n8n
|
||||
DOMAIN=n8n.example.com
|
||||
|
||||
# Your timezone
|
||||
GENERIC_TIMEZONE=Europe/Warsaw
|
||||
|
||||
# Directory where n8n will store files
|
||||
PATH_TO_FILES=/path/to/n8n/files
|
||||
@@ -1,24 +0,0 @@
|
||||
services:
|
||||
n8n:
|
||||
image: docker.n8n.io/n8nio/n8n
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
- N8N_HOST=${DOMAIN}
|
||||
- N8N_PORT=5678
|
||||
- N8N_PROTOCOL=https
|
||||
- NODE_ENV=production
|
||||
- WEBHOOK_URL=https://${DOMAIN}/
|
||||
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
|
||||
volumes:
|
||||
- n8n_data:/home/node/.n8n
|
||||
- ${PATH_TO_FILES}:/files
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:5678/healthz || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
volumes:
|
||||
n8n_data:
|
||||
@@ -1,35 +1,31 @@
|
||||
version: '3.3'
|
||||
services:
|
||||
nextcloud:
|
||||
nextcloud:
|
||||
image: lscr.io/linuxserver/nextcloud:latest
|
||||
container_name: nextcloud
|
||||
env_file:
|
||||
- .env
|
||||
- stack.env
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
|
||||
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
|
||||
- TZ=${TZ}
|
||||
|
||||
volumes:
|
||||
- ${CONFIG}:/config
|
||||
- ${DATA}:/data
|
||||
ports:
|
||||
- "5443:443"
|
||||
- 5443:443
|
||||
restart: unless-stopped
|
||||
links:
|
||||
- nextcloud-mariadb
|
||||
depends_on:
|
||||
nextcloud-mariadb:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- nextcloud
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
nextcloud-mariadb:
|
||||
- nextcloud-mariadb
|
||||
|
||||
nextcloud-mariadb:
|
||||
image: lscr.io/linuxserver/mariadb:latest
|
||||
container_name: nextcloud-mariadb
|
||||
container_name: nextloud-mariadb
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
@@ -38,20 +34,15 @@ services:
|
||||
- MYSQL_DATABASE=nextcloud
|
||||
- MYSQL_USER=nextcloud
|
||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||
|
||||
volumes:
|
||||
- ${MARIADB}:/config
|
||||
ports:
|
||||
- "5306:3306"
|
||||
- 5306:3306
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- nextcloud
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
|
||||
networks:
|
||||
nextcloud:
|
||||
default:
|
||||
name: nextcloud
|
||||
driver: bridge
|
||||
driver: bridge
|
||||
@@ -11,9 +11,4 @@ services:
|
||||
volumes:
|
||||
- ${PGADMIN_DATA}:/var/lib/pgadmin
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/misc/ping || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
- "host.docker.internal:host-gateway"
|
||||
@@ -8,9 +8,4 @@ services:
|
||||
volumes:
|
||||
- ${PORTAINER_DATA}:/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/api/status"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
@@ -1,4 +1,11 @@
|
||||
<<<<<<< HEAD
|
||||
SEMAPHORE_ADMIN=admin
|
||||
SEMAPHORE_ADMIN_PASSWORD=changeme
|
||||
SEMAPHORE_ADMIN_NAME=Admin
|
||||
SEMAPHORE_ADMIN_EMAIL=admin@example.com
|
||||
SEMAPHORE_ADMIN_EMAIL=admin@example.com
|
||||
=======
|
||||
ADMIN_USER=admin
|
||||
ADMIN_PASS=changeme
|
||||
ADMIN_NAME=Admin
|
||||
ADMIN_EMAIL=admin@example.com
|
||||
>>>>>>> ab90593 (ADD: Semaphore UI initial commit)
|
||||
|
||||
@@ -1,33 +1,17 @@
|
||||
services:
|
||||
semaphore:
|
||||
container_name: semaphore-ui
|
||||
image: semaphoreui/semaphore:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "3030:3000"
|
||||
environment:
|
||||
SEMAPHORE_DB_DIALECT: sqlite
|
||||
SEMAPHORE_ADMIN: ${ADMIN_USER}
|
||||
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
|
||||
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
|
||||
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
|
||||
volumes:
|
||||
- semaphore-data:/var/lib/semaphore
|
||||
- semaphore-config:/etc/semaphore
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- semaphore-net
|
||||
|
||||
semaphore:
|
||||
ports:
|
||||
- 3030:3000
|
||||
image: semaphoreui/semaphore:v2.16.18
|
||||
environment:
|
||||
SEMAPHORE_DB_DIALECT: sqlite
|
||||
SEMAPHORE_ADMIN: ${ADMIN_USER}
|
||||
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
|
||||
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
|
||||
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
|
||||
volumes:
|
||||
- semaphore-data:/var/lib/semaphore
|
||||
- semaphore-config:/etc/semaphore
|
||||
volumes:
|
||||
semaphore-data:
|
||||
driver: local
|
||||
semaphore-config:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
semaphore-net:
|
||||
driver: bridge
|
||||
@@ -32,11 +32,6 @@ services:
|
||||
max-size: 10m
|
||||
ports:
|
||||
- '9091:9091'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 && ls /data || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
|
||||
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent
|
||||
@@ -1,3 +1,4 @@
|
||||
---
|
||||
services:
|
||||
transmission:
|
||||
image: lscr.io/linuxserver/transmission:latest
|
||||
@@ -21,8 +22,3 @@ services:
|
||||
- 51413:51413
|
||||
- 51413:51413/udp
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
@@ -8,9 +8,4 @@ services:
|
||||
volumes:
|
||||
- ${PATH_TO_DATA}:/data
|
||||
ports:
|
||||
- 8033:80
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/ || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
- 8033:80
|
||||
@@ -1,6 +0,0 @@
|
||||
# Timezone for watchtower
|
||||
TZ=Europe/Warsaw
|
||||
|
||||
# A space-separated list of container names for Watchtower to monitor.
|
||||
# For example: WATCHTOWER_CONTAINERS="nginx-proxy-manager bookstack"
|
||||
WATCHTOWER_CONTAINERS=""
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
watchtower:
|
||||
image: containrrr/watchtower
|
||||
container_name: watchtower
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- WATCHTOWER_CLEANUP=true
|
||||
- WATCHTOWER_INCLUDE_STOPPED=true
|
||||
- WATCHTOWER_POLL_INTERVAL=3600
|
||||
- TZ=${TZ}
|
||||
command: ${WATCHTOWER_CONTAINERS}
|
||||
restart: unless-stopped
|
||||
@@ -13,9 +13,4 @@ services:
|
||||
# devices:
|
||||
# - /dev/dri:/dev/dri #optional
|
||||
shm_size: "2gb" #optional
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1"]
|
||||
interval: 1m
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
Reference in New Issue
Block a user