Compare commits
30 Commits
docker-ori
...
test
| Author | SHA1 | Date | |
|---|---|---|---|
| dd080fff37 | |||
| 4915444669 | |||
| 467a72c5c6 | |||
| 3854dc32d6 | |||
| 8ef0d80f02 | |||
| 9d8d2b702f | |||
| 33ef444052 | |||
| f8328ab96c | |||
| 7524f5666c | |||
| e117bd0180 | |||
| e5adc77c74 | |||
| 7b1f93b654 | |||
| f234d3846d | |||
| ad3f7e1d8f | |||
| 81c22b7c7a | |||
| 08f669a770 | |||
| 4b81b5abec | |||
| e649a0f5de | |||
| 08c28fc1ef | |||
| 1f192652c4 | |||
| 10092050f2 | |||
| 15c8e50ca3 | |||
| 8381ff9485 | |||
| 81d68696cf | |||
| 0e1ef8f63b | |||
| 5715ba5c98 | |||
| 76e58b602f | |||
| 3fe7e47c3e | |||
| 527d330e72 | |||
| d5f483dbc8 |
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
.env
|
||||||
@@ -14,11 +14,14 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${PATH_TO_CONFIG}:/config
|
- ${PATH_TO_CONFIG}:/config
|
||||||
ports:
|
ports:
|
||||||
- 6875:80
|
- "6875:80"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
- bookstack_database
|
bookstack_database:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- bookstack-net
|
||||||
|
|
||||||
bookstack_database:
|
bookstack_database:
|
||||||
image: lscr.io/linuxserver/mariadb
|
image: lscr.io/linuxserver/mariadb
|
||||||
container_name: bookstack_database
|
container_name: bookstack_database
|
||||||
@@ -32,4 +35,15 @@ services:
|
|||||||
- MYSQL_PASSWORD=${DB_USER_PASS}
|
- MYSQL_PASSWORD=${DB_USER_PASS}
|
||||||
volumes:
|
volumes:
|
||||||
- ${PATH_TO_DB}:/config
|
- ${PATH_TO_DB}:/config
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- bookstack-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
bookstack-net:
|
||||||
|
driver: bridge
|
||||||
|
|||||||
15
ddns-cloudflare/docker-compose.yaml
Normal file
15
ddns-cloudflare/docker-compose.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
services:
|
||||||
|
cloudflare-ddns:
|
||||||
|
image: favonia/cloudflare-ddns:latest
|
||||||
|
container_name: cloudflare-ddns
|
||||||
|
env_file: .env
|
||||||
|
network_mode: host
|
||||||
|
restart: always
|
||||||
|
user: "1000:1000"
|
||||||
|
read_only: true
|
||||||
|
cap_drop: [all]
|
||||||
|
security_opt: [no-new-privileges:true]
|
||||||
|
environment:
|
||||||
|
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
||||||
|
- DOMAINS=${DOMAINS}
|
||||||
|
- PROXIED=true
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
services:
|
|
||||||
cloudflare-ddns:
|
|
||||||
image: favonia/cloudflare-ddns:latest
|
|
||||||
container_name: cloudflare-ddns
|
|
||||||
env_file: .env
|
|
||||||
# Choose the appropriate tag based on your need:
|
|
||||||
# - "latest" for the latest stable version (which could become 2.x.y
|
|
||||||
# in the future and break things)
|
|
||||||
# - "1" for the latest stable version whose major version is 1
|
|
||||||
# - "1.x.y" to pin the specific version 1.x.y
|
|
||||||
network_mode: host
|
|
||||||
# This bypasses network isolation and makes IPv6 easier (optional; see below)
|
|
||||||
restart: always
|
|
||||||
# Restart the updater after reboot
|
|
||||||
user: "1000:1000"
|
|
||||||
# Run the updater with specific user and group IDs (in that order).
|
|
||||||
# You can change the two numbers based on your need.
|
|
||||||
read_only: true
|
|
||||||
# Make the container filesystem read-only (optional but recommended)
|
|
||||||
cap_drop: [all]
|
|
||||||
# Drop all Linux capabilities (optional but recommended)
|
|
||||||
security_opt: [no-new-privileges:true]
|
|
||||||
# Another protection to restrict superuser privileges (optional but recommended)
|
|
||||||
environment:
|
|
||||||
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
|
|
||||||
# Your Cloudflare API token
|
|
||||||
- DOMAINS=${DOMAINS}
|
|
||||||
# Your domains (separated by commas)
|
|
||||||
- PROXIED=true
|
|
||||||
# Tell Cloudflare to cache webpages and hide your IP (optional)
|
|
||||||
#networks:
|
|
||||||
# LAN0:
|
|
||||||
# external: true
|
|
||||||
# name: LAN0
|
|
||||||
# Introduce custom Docker networks to the 'services' in this file. A common use case
|
|
||||||
# for this is binding one of the 'services' to a specific network interface available at
|
|
||||||
# Docker's host. This section is required for the 'networks' section of each 'services'.
|
|
||||||
15
garmin-grafana/.env.example
Normal file
15
garmin-grafana/.env.example
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# InfluxDB credentials
|
||||||
|
INFLUXDB_PASSWORD=influxdb_secret_password
|
||||||
|
|
||||||
|
# Grafana credentials
|
||||||
|
GF_SECURITY_ADMIN_USER=admin
|
||||||
|
GF_SECURITY_ADMIN_PASSWORD=admin
|
||||||
|
|
||||||
|
# Garmin Connect credentials
|
||||||
|
GARMINCONNECT_EMAIL=your_garmin_email@example.com
|
||||||
|
GARMINCONNECT_PASSWORD=your_garmin_password_base64_encoded
|
||||||
|
|
||||||
|
# Paths for persistent data
|
||||||
|
GARMINCONNECT_TOKENS=./garminconnect-tokens
|
||||||
|
PATH_TO_INFLUXDB_DATA=./influxdb_data
|
||||||
|
PATH_TO_GRAFANA_DATA=./grafana_data
|
||||||
76
garmin-grafana/docker-compose.yaml
Normal file
76
garmin-grafana/docker-compose.yaml
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
services:
|
||||||
|
garmin-fetch-data:
|
||||||
|
restart: unless-stopped
|
||||||
|
image: thisisarpanghosh/garmin-fetch-data:latest
|
||||||
|
container_name: garmin-fetch-data
|
||||||
|
depends_on:
|
||||||
|
influxdb:
|
||||||
|
condition: service_healthy
|
||||||
|
volumes:
|
||||||
|
- garminconnect_tokens:/home/appuser/.garminconnect # persisted tokens storage (named volume)
|
||||||
|
environment:
|
||||||
|
- INFLUXDB_HOST=influxdb
|
||||||
|
- INFLUXDB_PORT=8086
|
||||||
|
- INFLUXDB_USERNAME=influxdb_user
|
||||||
|
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
|
||||||
|
- INFLUXDB_DATABASE=GarminStats
|
||||||
|
- UPDATE_INTERVAL_SECONDS=300
|
||||||
|
- LOG_LEVEL=INFO
|
||||||
|
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
|
||||||
|
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
|
||||||
|
networks:
|
||||||
|
- garmin-grafana-net
|
||||||
|
|
||||||
|
influxdb:
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: influxdb
|
||||||
|
hostname: influxdb
|
||||||
|
image: influxdb:latest
|
||||||
|
environment:
|
||||||
|
- INFLUXDB_DB=GarminStats
|
||||||
|
- INFLUXDB_USER=influxdb_user
|
||||||
|
- INFLUXDB_USER_PASSWORD=${INFLUXDB_PASSWORD}
|
||||||
|
- INFLUXDB_DATA_INDEX_VERSION=tsi1
|
||||||
|
ports:
|
||||||
|
- '8086:8086'
|
||||||
|
volumes:
|
||||||
|
- influxdb_data:/var/lib/influxdb
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- garmin-grafana-net
|
||||||
|
|
||||||
|
grafana:
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: grafana
|
||||||
|
hostname: grafana
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
ports:
|
||||||
|
- '3000:3000'
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- garmin-grafana-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
garmin-grafana-net:
|
||||||
|
driver: bridge
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
garminconnect_tokens:
|
||||||
|
name: garminconnect_tokens
|
||||||
|
influxdb_data:
|
||||||
|
name: influxdb_data
|
||||||
|
grafana_data:
|
||||||
|
name: grafana_data
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
services:
|
|
||||||
garmin-fetch-data:
|
|
||||||
restart: unless-stopped
|
|
||||||
image: thisisarpanghosh/garmin-fetch-data:latest
|
|
||||||
container_name: garmin-fetch-data
|
|
||||||
depends_on:
|
|
||||||
- influxdb
|
|
||||||
volumes:
|
|
||||||
- ${GARMINCONNECT_TOKENS}:/home/appuser/.garminconnect # (persistant tokens storage - garminconnect-tokens folder must be owned by 1000:1000)
|
|
||||||
environment:
|
|
||||||
- INFLUXDB_HOST=influxdb
|
|
||||||
- INFLUXDB_PORT=8086
|
|
||||||
- INFLUXDB_USERNAME=influxdb_user
|
|
||||||
- INFLUXDB_PASSWORD=influxdb_secret_password
|
|
||||||
- INFLUXDB_DATABASE=GarminStats
|
|
||||||
- UPDATE_INTERVAL_SECONDS=300
|
|
||||||
- LOG_LEVEL=INFO
|
|
||||||
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
|
|
||||||
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
|
|
||||||
|
|
||||||
|
|
||||||
influxdb:
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: influxdb
|
|
||||||
hostname: influxdb
|
|
||||||
environment:
|
|
||||||
- INFLUXDB_DB=GarminStats
|
|
||||||
- INFLUXDB_USER=influxdb_user
|
|
||||||
- INFLUXDB_USER_PASSWORD=influxdb_secret_password
|
|
||||||
- INFLUXDB_DATA_INDEX_VERSION=tsi1
|
|
||||||
ports:
|
|
||||||
- '8086:8086'
|
|
||||||
volumes:
|
|
||||||
- influxdb_data:/var/lib/influxdb
|
|
||||||
image: 'influxdb:1.11'
|
|
||||||
|
|
||||||
grafana:
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: grafana
|
|
||||||
hostname: grafana
|
|
||||||
environment:
|
|
||||||
- GF_SECURITY_ADMIN_USER=admin
|
|
||||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
|
||||||
volumes:
|
|
||||||
- grafana_data:/var/lib/grafana
|
|
||||||
ports:
|
|
||||||
- '3000:3000'
|
|
||||||
image: 'grafana/grafana:latest'
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
influxdb_data:
|
|
||||||
grafana_data:
|
|
||||||
3
gitea/.env.example
Normal file
3
gitea/.env.example
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
GITEA_INSTANCE_URL=<instance_url>
|
||||||
|
GITEA_RUNNER_REGISTRATION_TOKEN=<registration_token>
|
||||||
|
GITEA_RUNNER_NAME=<runner_name>
|
||||||
0
gitea/docker-compose.yaml
Normal file
0
gitea/docker-compose.yaml
Normal file
@@ -1,12 +1,23 @@
|
|||||||
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
# An image from abesnier that works as an all-in-one and does not require database initiation.
|
||||||
# I don't know if it has any limitations. For my needs it fits perfectly.
|
# I don't know if it has any limitations. For my needs it fits perfectly.
|
||||||
|
|
||||||
services:
|
services:
|
||||||
guacamole:
|
guacamole:
|
||||||
image: abesnier/guacamole:latest
|
image: abesnier/guacamole:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
container_name: guacamole
|
container_name: guacamole
|
||||||
volumes:
|
volumes:
|
||||||
- ${PATH_TO_CONFIG}:/config
|
- ${PATH_TO_CONFIG}:/config
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- guacamole-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
guacamole-net:
|
||||||
|
driver: bridge
|
||||||
2
heimdall/.env.example
Normal file
2
heimdall/.env.example
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# Path to the Heimdall config folder
|
||||||
|
PATH_TO_CONFIG=/home/mbuz/docker/heimdall/config
|
||||||
25
heimdall/docker-compose.yaml
Normal file
25
heimdall/docker-compose.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
heimdall:
|
||||||
|
image: lscr.io/linuxserver/heimdall:latest
|
||||||
|
container_name: heimdall
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- TZ=Europe/Warsaw
|
||||||
|
volumes:
|
||||||
|
- ${PATH_TO_CONFIG}:/config
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- heimdall-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
heimdall-net:
|
||||||
|
driver: bridge
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
version: "2.1"
|
|
||||||
|
|
||||||
services:
|
|
||||||
heimdall:
|
|
||||||
image: lscr.io/linuxserver/heimdall:latest
|
|
||||||
container_name: heimdall
|
|
||||||
environment:
|
|
||||||
- PUID=1000
|
|
||||||
- PGID=1000
|
|
||||||
- TZ=Europe/Warsaw
|
|
||||||
volumes:
|
|
||||||
- /home/mbuz/docker/heimdall/config:/config
|
|
||||||
ports:
|
|
||||||
- 80:80
|
|
||||||
- 443:443
|
|
||||||
restart: unless-stopped
|
|
||||||
@@ -1,3 +1,35 @@
|
|||||||
# Authentication parameters for Proxmox API
|
# Main variables for homepage
|
||||||
HOMEPAGE_VAR_PROXMOX_PAM=${HOMEPAGE_VAR_PROXMOX_PAM}
|
HOMEPAGE_ALLOWED_HOSTS='homepage.example.com,127.0.0.1:3000'
|
||||||
HOMEPAGE_VAR_PROXMOX_SECRET=${HOMEPAGE_VAR_PROXMOX_SECRET}
|
PATH_TO_CONFIG='/docker/data/homepage'
|
||||||
|
|
||||||
|
# Variables for services
|
||||||
|
HOMEPAGE_VAR_PROXMOX_URL=https://proxmox.example.com:8006
|
||||||
|
HOMEPAGE_VAR_PROXMOX_PAM=root@pam
|
||||||
|
HOMEPAGE_VAR_PROXMOX_SECRET='your_proxmox_api_token'
|
||||||
|
HOMEPAGE_VAR_PORTAINER_URL=https://portainer.example.com
|
||||||
|
HOMEPAGE_VAR_PORTAINER_KEY='your_portainer_api_token'
|
||||||
|
HOMEPAGE_VAR_TRUENAS_URL=http://truenas.example.com
|
||||||
|
HOMEPAGE_VAR_TRUENAS_KEY='your_truenas_api_token'
|
||||||
|
HOMEPAGE_VAR_TRANSMISSION_URL=http://transmission.example.com:9091
|
||||||
|
HOMEPAGE_VAR_IMMICH_URL=http://immich.example.com
|
||||||
|
HOMEPAGE_VAR_IMMICH_KEY='your_immich_api_token'
|
||||||
|
HOMEPAGE_VAR_SEAFILE_URL=https://seafile.example.com/
|
||||||
|
HOMEPAGE_VAR_NPM_URL=http://npm.example.com:81
|
||||||
|
HOMEPAGE_VAR_NPM_USER=user@example.com
|
||||||
|
HOMEPAGE_VAR_NPM_PASS='your_npm_password'
|
||||||
|
HOMEPAGE_VAR_ADGUARD_URL=http://adguard.example.com
|
||||||
|
HOMEPAGE_VAR_ADGUARD_USER='your_adguard_username'
|
||||||
|
HOMEPAGE_VAR_ADGUARD_PASS='your_adguard_password'
|
||||||
|
HOMEPAGE_VAR_GUACAMOLE_URL=http://guacamole.example.com
|
||||||
|
HOMEPAGE_VAR_WEBTOP_URL=http://webtop.example.com
|
||||||
|
HOMEPAGE_VAR_LENOVO_AMT_URL=http://amt.example.com:16992
|
||||||
|
HOMEPAGE_VAR_ZABBIX_URL=https://zabbix.example.com
|
||||||
|
HOMEPAGE_VAR_ZABBIX_KEY='your_zabbix_api_token'
|
||||||
|
HOMEPAGE_VAR_GRAFANA_URL=https://grafana.example.com
|
||||||
|
HOMEPAGE_VAR_IT_TOOLS_URL=http://it-tools.example.com
|
||||||
|
HOMEPAGE_VAR_VAULTWARDEN_URL=https://vaultwarden.example.com
|
||||||
|
HOMEPAGE_VAR_GITEA_URL=http://gitea.example.com:3000
|
||||||
|
HOMEPAGE_VAR_GITEA_KEY='your_gitea_api_token'
|
||||||
|
HOMEPAGE_VAR_KOMODO_URL='http://komodo.example.com:9120/'
|
||||||
|
HOMEPAGE_VAR_KOMODO_KEY='K-your_komodo_api_key'
|
||||||
|
HOMEPAGE_VAR_KOMODO_SECRET='S-your_komodo_api_secret'
|
||||||
@@ -1,12 +1,24 @@
|
|||||||
services:
|
services:
|
||||||
homepage:
|
homepage:
|
||||||
image: ghcr.io/gethomepage/homepage:latest
|
image: ghcr.io/gethomepage/homepage:latest
|
||||||
container_name: homepage
|
container_name: homepage
|
||||||
ports:
|
ports:
|
||||||
- 3001:3000
|
- "3001:3000"
|
||||||
volumes:
|
env_file:
|
||||||
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
- .env # Make sure this file exists and contains necessary environment variables (chek .env.example)
|
||||||
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
volumes:
|
||||||
env_file:
|
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
|
||||||
- .env
|
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:3000/api/healthcheck || exit 1"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 3
|
||||||
|
start_period: 60s
|
||||||
|
networks:
|
||||||
|
- homepage-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
homepage-net:
|
||||||
|
driver: bridge
|
||||||
|
|||||||
@@ -30,6 +30,8 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
healthcheck:
|
healthcheck:
|
||||||
disable: false
|
disable: false
|
||||||
|
networks:
|
||||||
|
- immich-net
|
||||||
|
|
||||||
immich-machine-learning:
|
immich-machine-learning:
|
||||||
container_name: immich_machine_learning
|
container_name: immich_machine_learning
|
||||||
@@ -46,6 +48,8 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
healthcheck:
|
healthcheck:
|
||||||
disable: false
|
disable: false
|
||||||
|
networks:
|
||||||
|
- immich-net
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
container_name: immich_redis
|
container_name: immich_redis
|
||||||
@@ -53,6 +57,8 @@ services:
|
|||||||
healthcheck:
|
healthcheck:
|
||||||
test: redis-cli ping || exit 1
|
test: redis-cli ping || exit 1
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- immich-net
|
||||||
|
|
||||||
database:
|
database:
|
||||||
container_name: immich_postgres
|
container_name: immich_postgres
|
||||||
@@ -84,6 +90,12 @@ services:
|
|||||||
-c shared_buffers=512MB
|
-c shared_buffers=512MB
|
||||||
-c wal_compression=on
|
-c wal_compression=on
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- immich-net
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
model-cache:
|
model-cache:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
immich-net:
|
||||||
|
driver: bridge
|
||||||
18
it-tools/docker-compose.yaml
Normal file
18
it-tools/docker-compose.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
services:
|
||||||
|
it-tools:
|
||||||
|
image: 'corentinth/it-tools:latest'
|
||||||
|
ports:
|
||||||
|
- '8182:80' # change if needed
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: it-tools
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- it-tools-net
|
||||||
|
|
||||||
|
networks:
|
||||||
|
it-tools-net:
|
||||||
|
driver: bridge
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
services:
|
|
||||||
it-tools:
|
|
||||||
image: 'corentinth/it-tools:latest'
|
|
||||||
ports:
|
|
||||||
- '8182:80' # change if needed
|
|
||||||
restart: unless-stopped
|
|
||||||
container_name: it-tools
|
|
||||||
152
komodo/.env.example
Normal file
152
komodo/.env.example
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
####################################
|
||||||
|
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
|
||||||
|
####################################
|
||||||
|
|
||||||
|
## These compose variables can be used with all Komodo deployment options.
|
||||||
|
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
|
||||||
|
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
|
||||||
|
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
|
||||||
|
|
||||||
|
## Stick to a specific version, or use `latest`
|
||||||
|
COMPOSE_KOMODO_IMAGE_TAG=latest
|
||||||
|
## Store dated database backups on the host - https://komo.do/docs/setup/backup
|
||||||
|
COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups
|
||||||
|
|
||||||
|
## DB credentials
|
||||||
|
KOMODO_DB_USERNAME=admin
|
||||||
|
KOMODO_DB_PASSWORD=admin
|
||||||
|
|
||||||
|
## Configure a secure passkey to authenticate between Core / Periphery.
|
||||||
|
KOMODO_PASSKEY=a_random_passkey
|
||||||
|
|
||||||
|
## Set your time zone for schedules
|
||||||
|
## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||||
|
TZ=Etc/UTC
|
||||||
|
|
||||||
|
#=-------------------------=#
|
||||||
|
#= Komodo Core Environment =#
|
||||||
|
#=-------------------------=#
|
||||||
|
|
||||||
|
## Full variable list + descriptions are available here:
|
||||||
|
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
|
||||||
|
|
||||||
|
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
|
||||||
|
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
|
||||||
|
|
||||||
|
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
|
||||||
|
KOMODO_HOST=https://demo.komo.do
|
||||||
|
## Displayed in the browser tab.
|
||||||
|
KOMODO_TITLE=Komodo
|
||||||
|
## Create a server matching this address as the "first server".
|
||||||
|
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
|
||||||
|
KOMODO_FIRST_SERVER=https://periphery:8120
|
||||||
|
## Give the first server a custom name.
|
||||||
|
KOMODO_FIRST_SERVER_NAME=Local
|
||||||
|
## Make all buttons just double-click, rather than the full confirmation dialog.
|
||||||
|
KOMODO_DISABLE_CONFIRM_DIALOG=false
|
||||||
|
|
||||||
|
## Rate Komodo polls your servers for
|
||||||
|
## status / container status / system stats / alerting.
|
||||||
|
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min, 15-min
|
||||||
|
## Default: 15-sec
|
||||||
|
KOMODO_MONITORING_INTERVAL="15-sec"
|
||||||
|
## Interval at which to poll Resources for any updates / automated actions.
|
||||||
|
## Options: 15-min, 1-hr, 2-hr, 6-hr, 12-hr, 1-day
|
||||||
|
## Default: 1-hr
|
||||||
|
KOMODO_RESOURCE_POLL_INTERVAL="1-hr"
|
||||||
|
|
||||||
|
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
|
||||||
|
KOMODO_WEBHOOK_SECRET=a_random_secret
|
||||||
|
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
|
||||||
|
KOMODO_JWT_SECRET=a_random_jwt_secret
|
||||||
|
## Time to live for jwt tokens.
|
||||||
|
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
|
||||||
|
KOMODO_JWT_TTL="1-day"
|
||||||
|
|
||||||
|
## Enable login with username + password.
|
||||||
|
KOMODO_LOCAL_AUTH=true
|
||||||
|
## Set the initial admin username created upon first launch.
|
||||||
|
## Comment out to disable initial user creation,
|
||||||
|
## and create first user using signup button.
|
||||||
|
KOMODO_INIT_ADMIN_USERNAME=admin
|
||||||
|
## Set the initial admin password
|
||||||
|
KOMODO_INIT_ADMIN_PASSWORD=changeme
|
||||||
|
## Disable new user signups.
|
||||||
|
KOMODO_DISABLE_USER_REGISTRATION=false
|
||||||
|
## All new logins are auto enabled
|
||||||
|
KOMODO_ENABLE_NEW_USERS=false
|
||||||
|
## Disable non-admins from creating new resources.
|
||||||
|
KOMODO_DISABLE_NON_ADMIN_CREATE=false
|
||||||
|
## Allows all users to have Read level access to all resources.
|
||||||
|
KOMODO_TRANSPARENT_MODE=false
|
||||||
|
|
||||||
|
## Prettier logging with empty lines between logs
|
||||||
|
KOMODO_LOGGING_PRETTY=false
|
||||||
|
## More human readable logging of startup config (multi-line)
|
||||||
|
KOMODO_PRETTY_STARTUP_CONFIG=false
|
||||||
|
|
||||||
|
## OIDC Login
|
||||||
|
KOMODO_OIDC_ENABLED=false
|
||||||
|
## Must reachable from Komodo Core container
|
||||||
|
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
|
||||||
|
## Change the host to one reachable be reachable by users (optional if it is the same as above).
|
||||||
|
## DO NOT include the `path` part of the URL.
|
||||||
|
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
|
||||||
|
## Your OIDC client id
|
||||||
|
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
|
||||||
|
## Your OIDC client secret.
|
||||||
|
## If your provider supports PKCE flow, this can be ommitted.
|
||||||
|
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
|
||||||
|
## Make usernames the full email.
|
||||||
|
## Note. This does not work for all OIDC providers.
|
||||||
|
# KOMODO_OIDC_USE_FULL_EMAIL=true
|
||||||
|
## Add additional trusted audiences for token claims verification.
|
||||||
|
## Supports comma separated list, and passing with _FILE (for compose secrets).
|
||||||
|
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
|
||||||
|
|
||||||
|
## Github Oauth
|
||||||
|
KOMODO_GITHUB_OAUTH_ENABLED=false
|
||||||
|
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
|
||||||
|
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
|
||||||
|
|
||||||
|
## Google Oauth
|
||||||
|
KOMODO_GOOGLE_OAUTH_ENABLED=false
|
||||||
|
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
|
||||||
|
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
|
||||||
|
|
||||||
|
## Aws - Used to launch Builder instances.
|
||||||
|
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
|
||||||
|
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
|
||||||
|
|
||||||
|
#=------------------------------=#
|
||||||
|
#= Komodo Periphery Environment =#
|
||||||
|
#=------------------------------=#
|
||||||
|
|
||||||
|
## Full variable list + descriptions are available here:
|
||||||
|
## 🦎 https://github.com/moghtech/komodo/blob/main/config/periphery.config.toml 🦎
|
||||||
|
|
||||||
|
## Specify the root directory used by Periphery agent.
|
||||||
|
PERIPHERY_ROOT_DIRECTORY=/etc/komodo
|
||||||
|
|
||||||
|
## Periphery passkeys must include KOMODO_PASSKEY to authenticate.
|
||||||
|
PERIPHERY_PASSKEYS=${KOMODO_PASSKEY}
|
||||||
|
|
||||||
|
## Specify whether to disable the terminals feature
|
||||||
|
## and disallow remote shell access (inside the Periphery container).
|
||||||
|
PERIPHERY_DISABLE_TERMINALS=false
|
||||||
|
|
||||||
|
## Enable SSL using self signed certificates.
|
||||||
|
## Connect to Periphery at https://address:8120.
|
||||||
|
PERIPHERY_SSL_ENABLED=true
|
||||||
|
|
||||||
|
## If the disk size is overreporting, can use one of these to
|
||||||
|
## whitelist / blacklist the disks to filter them, whichever is easier.
|
||||||
|
## Accepts comma separated list of paths.
|
||||||
|
## Usually whitelisting just /etc/hostname gives correct size.
|
||||||
|
PERIPHERY_INCLUDE_DISK_MOUNTS=/etc/hostname
|
||||||
|
# PERIPHERY_EXCLUDE_DISK_MOUNTS=/snap,/etc/repos
|
||||||
|
|
||||||
|
## Prettier logging with empty lines between logs
|
||||||
|
PERIPHERY_LOGGING_PRETTY=false
|
||||||
|
## More human readable logging of startup config (multi-line)
|
||||||
|
PERIPHERY_PRETTY_STARTUP_CONFIG=false
|
||||||
13
komodo/agent-compose.yaml
Normal file
13
komodo/agent-compose.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
periphery:
|
||||||
|
image: ghcr.io/moghtech/komodo-periphery:latest
|
||||||
|
container_name: komodo-periphery
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "8120:8120"
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- /proc:/proc
|
||||||
|
- /etc/komodo:/etc/komodo
|
||||||
79
komodo/docker-compose.yaml
Normal file
79
komodo/docker-compose.yaml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
################################
|
||||||
|
# 🦎 KOMODO COMPOSE - MONGO 🦎 #
|
||||||
|
################################
|
||||||
|
## This compose file will deploy:
|
||||||
|
## 1. MongoDB
|
||||||
|
## 2. Komodo Core
|
||||||
|
## 3. Komodo Periphery
|
||||||
|
services:
|
||||||
|
mongo:
|
||||||
|
image: mongo:4.4.18
|
||||||
|
container_name: komodo-mongo
|
||||||
|
labels:
|
||||||
|
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||||
|
command: --quiet --wiredTigerCacheSizeGB 0.25
|
||||||
|
restart: unless-stopped
|
||||||
|
# ports:
|
||||||
|
# - 27017:27017
|
||||||
|
volumes:
|
||||||
|
- mongo-data:/data/db
|
||||||
|
- mongo-config:/data/configdb
|
||||||
|
environment:
|
||||||
|
MONGO_INITDB_ROOT_USERNAME: ${KOMODO_DB_USERNAME}
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||||
|
|
||||||
|
core:
|
||||||
|
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
|
||||||
|
container_name: komodo-core
|
||||||
|
labels:
|
||||||
|
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- mongo
|
||||||
|
ports:
|
||||||
|
- 9120:9120
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
environment:
|
||||||
|
KOMODO_DATABASE_ADDRESS: mongo:27017
|
||||||
|
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
|
||||||
|
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||||
|
volumes:
|
||||||
|
## Store dated backups of the database - https://komo.do/docs/setup/backup
|
||||||
|
## No need to have bind mounts because of the ability to create backups on a separate volume
|
||||||
|
- ${COMPOSE_KOMODO_BACKUPS_PATH}:/backups
|
||||||
|
## Store sync files on server
|
||||||
|
# - /path/to/syncs:/syncs
|
||||||
|
## Optionally mount a custom core.config.toml
|
||||||
|
# - /path/to/core.config.toml:/config/config.toml
|
||||||
|
## Allows for systemd Periphery connection at
|
||||||
|
## "https://host.docker.internal:8120"
|
||||||
|
# extra_hosts:
|
||||||
|
# - host.docker.internal:host-gateway
|
||||||
|
|
||||||
|
## Deploy Periphery container using this block,
|
||||||
|
## or deploy the Periphery binary with systemd using
|
||||||
|
## https://github.com/moghtech/komodo/tree/main/scripts
|
||||||
|
periphery:
|
||||||
|
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
|
||||||
|
container_name: komodo-periphery
|
||||||
|
labels:
|
||||||
|
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||||
|
restart: unless-stopped
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
volumes:
|
||||||
|
## Mount external docker socket
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
## Allow Periphery to see processes outside of container
|
||||||
|
- /proc:/proc
|
||||||
|
## Specify the Periphery agent root directory.
|
||||||
|
## Must be the same inside and outside the container,
|
||||||
|
## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
|
||||||
|
## Default: /etc/komodo.
|
||||||
|
- ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}:${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
# Mongo
|
||||||
|
mongo-data:
|
||||||
|
mongo-config:
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# Where your media will be stored
|
# Where your media will be stored (if using bind mount, otherwise leave empty)
|
||||||
DATA_FOLDER=/path/to/data
|
DATA_FOLDER=/path/to/data
|
||||||
|
|
||||||
# URL for your domain for Mealie
|
# URL for your domain for Mealie
|
||||||
|
|||||||
@@ -12,11 +12,25 @@ services:
|
|||||||
limits:
|
limits:
|
||||||
memory: 1000M #
|
memory: 1000M #
|
||||||
volumes:
|
volumes:
|
||||||
- ${DATA_FOLDER}:/app/data/
|
- mealie_data:/app/data/
|
||||||
environment:
|
environment:
|
||||||
# Set Backend ENV Variables Here
|
# Set Backend ENV Variables Here
|
||||||
ALLOW_SIGNUP: "false"
|
ALLOW_SIGNUP: "false"
|
||||||
PUID: 1000
|
PUID: 1000
|
||||||
PGID: 1000
|
PGID: 1000
|
||||||
TZ: Europe/Warsaw
|
TZ: Europe/Warsaw
|
||||||
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
|
BASE_URL: ${BASE_URL:-https://mealie.yourdomain.com}
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "python -c 'import socket; s = socket.socket(); s.connect((\"localhost\", 9000))'"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- mealie-net
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
mealie_data:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
mealie-net:
|
||||||
|
driver: bridge
|
||||||
12
n8n/.env.example
Normal file
12
n8n/.env.example
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# Your timezone
|
||||||
|
GENERIC_TIMEZONE=Europe/Warsaw
|
||||||
|
# Directory where n8n will store files
|
||||||
|
PATH_TO_FILES=/path/to/n8n/files
|
||||||
|
# Domain for n8n
|
||||||
|
DOMAIN_NAME=mbuz.uk
|
||||||
|
SUBDOMAIN=automate
|
||||||
|
N8N_HOST=automate.mbuz.uk
|
||||||
|
N8N_PROTOCOL=https
|
||||||
|
# URL to webhook for the webhook triggers
|
||||||
|
WEBHOOK_URL=https://automate.mbuz.uk/
|
||||||
|
NODE_ENV=production
|
||||||
19
n8n/docker-compose.yaml
Normal file
19
n8n/docker-compose.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
services:
|
||||||
|
n8n:
|
||||||
|
image: docker.n8n.io/n8nio/n8n
|
||||||
|
container_name: n8n
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- 5678:5678
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
volumes:
|
||||||
|
- n8n_data:/home/node/.n8n
|
||||||
|
- ${PATH_TO_FILES}:/files
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:5678/healthz || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
volumes:
|
||||||
|
n8n_data:
|
||||||
@@ -1,31 +1,35 @@
|
|||||||
version: '3.3'
|
|
||||||
services:
|
services:
|
||||||
nextcloud:
|
nextcloud:
|
||||||
image: lscr.io/linuxserver/nextcloud:latest
|
image: lscr.io/linuxserver/nextcloud:latest
|
||||||
container_name: nextcloud
|
container_name: nextcloud
|
||||||
env_file:
|
env_file:
|
||||||
- stack.env
|
- .env
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=1000
|
- PGID=1000
|
||||||
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
|
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
|
||||||
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
|
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
|
||||||
- TZ=${TZ}
|
- TZ=${TZ}
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- ${CONFIG}:/config
|
- ${CONFIG}:/config
|
||||||
- ${DATA}:/data
|
- ${DATA}:/data
|
||||||
ports:
|
ports:
|
||||||
- 5443:443
|
- "5443:443"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
links:
|
|
||||||
- nextcloud-mariadb
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- nextcloud-mariadb
|
nextcloud-mariadb:
|
||||||
|
condition: service_healthy
|
||||||
nextcloud-mariadb:
|
networks:
|
||||||
|
- nextcloud
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
nextcloud-mariadb:
|
||||||
image: lscr.io/linuxserver/mariadb:latest
|
image: lscr.io/linuxserver/mariadb:latest
|
||||||
container_name: nextloud-mariadb
|
container_name: nextcloud-mariadb
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
- PGID=1000
|
- PGID=1000
|
||||||
@@ -34,15 +38,20 @@ services:
|
|||||||
- MYSQL_DATABASE=nextcloud
|
- MYSQL_DATABASE=nextcloud
|
||||||
- MYSQL_USER=nextcloud
|
- MYSQL_USER=nextcloud
|
||||||
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- ${MARIADB}:/config
|
- ${MARIADB}:/config
|
||||||
ports:
|
ports:
|
||||||
- 5306:3306
|
- "5306:3306"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- nextcloud
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
default:
|
nextcloud:
|
||||||
name: nextcloud
|
name: nextcloud
|
||||||
driver: bridge
|
driver: bridge
|
||||||
@@ -11,4 +11,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${PGADMIN_DATA}:/var/lib/pgadmin
|
- ${PGADMIN_DATA}:/var/lib/pgadmin
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/misc/ping || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
@@ -8,4 +8,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${PORTAINER_DATA}:/data
|
- ${PORTAINER_DATA}:/data
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/api/status"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
29
semaphore-ui/docker-compose.yaml
Normal file
29
semaphore-ui/docker-compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
services:
|
||||||
|
semaphore:
|
||||||
|
container_name: semaphore-ui
|
||||||
|
image: semaphoreui/semaphore:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "3030:3000"
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
volumes:
|
||||||
|
- semaphore-data:/var/lib/semaphore
|
||||||
|
- semaphore-config:/etc/semaphore
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- semaphore-net
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
semaphore-data:
|
||||||
|
driver: local
|
||||||
|
semaphore-config:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
networks:
|
||||||
|
semaphore-net:
|
||||||
|
driver: bridge
|
||||||
@@ -32,6 +32,11 @@ services:
|
|||||||
max-size: 10m
|
max-size: 10m
|
||||||
ports:
|
ports:
|
||||||
- '9091:9091'
|
- '9091:9091'
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 && ls /data || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
|
||||||
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
|
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
|
||||||
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent
|
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
---
|
|
||||||
services:
|
services:
|
||||||
transmission:
|
transmission:
|
||||||
image: lscr.io/linuxserver/transmission:latest
|
image: lscr.io/linuxserver/transmission:latest
|
||||||
@@ -22,3 +21,8 @@ services:
|
|||||||
- 51413:51413
|
- 51413:51413
|
||||||
- 51413:51413/udp
|
- 51413:51413/udp
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
@@ -8,4 +8,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${PATH_TO_DATA}:/data
|
- ${PATH_TO_DATA}:/data
|
||||||
ports:
|
ports:
|
||||||
- 8033:80
|
- 8033:80
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/ || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
6
watchtower/.env.example
Normal file
6
watchtower/.env.example
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Timezone for watchtower
|
||||||
|
TZ=Europe/Warsaw
|
||||||
|
|
||||||
|
# A space-separated list of container names for Watchtower to monitor.
|
||||||
|
# For example: WATCHTOWER_CONTAINERS="nginx-proxy-manager bookstack"
|
||||||
|
WATCHTOWER_CONTAINERS=""
|
||||||
13
watchtower/docker-compose.yaml
Normal file
13
watchtower/docker-compose.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
watchtower:
|
||||||
|
image: containrrr/watchtower
|
||||||
|
container_name: watchtower
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
environment:
|
||||||
|
- WATCHTOWER_CLEANUP=true
|
||||||
|
- WATCHTOWER_INCLUDE_STOPPED=true
|
||||||
|
- WATCHTOWER_POLL_INTERVAL=3600
|
||||||
|
- TZ=${TZ}
|
||||||
|
command: ${WATCHTOWER_CONTAINERS}
|
||||||
|
restart: unless-stopped
|
||||||
@@ -13,4 +13,9 @@ services:
|
|||||||
# devices:
|
# devices:
|
||||||
# - /dev/dri:/dev/dri #optional
|
# - /dev/dri:/dev/dri #optional
|
||||||
shm_size: "2gb" #optional
|
shm_size: "2gb" #optional
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1"]
|
||||||
|
interval: 1m
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
Reference in New Issue
Block a user