19 Commits

Author SHA1 Message Date
fa1f342c95 FIX: Changed all .yml to .yaml in orded to have the same way 2025-08-28 12:36:51 +02:00
dc94b51647 ADD: Agent compose file 2025-08-28 12:36:51 +02:00
558e494dfd FIX: Incorrect indentation 2025-08-28 12:36:51 +02:00
b47ac029bc FIX: yaml: line 1: did not find expected key 2025-08-28 12:36:51 +02:00
fcbaaab872 CHANGE: Bind to named mounts 2025-08-28 12:36:51 +02:00
1f6ac5d291 FIX: Changed mongo version to 4.4.18 because of AVX support 2025-08-28 12:36:51 +02:00
58f3f59665 FIX: Typo 2025-08-28 12:36:51 +02:00
543272ded8 CHANGE: Added container names 2025-08-28 12:36:51 +02:00
e7ca7182c0 FIX: Replaced second compose.env :D 2025-08-28 12:36:51 +02:00
f49b9fe4c3 FIX: compose.env was replaced with .env 2025-08-28 12:36:51 +02:00
3bc1487b29 ADD: Komodo 2025-08-28 12:36:51 +02:00
2bdbf1a313 FIX: Localhost check replaced with IP and now should work 2025-08-26 19:42:55 +02:00
5af8be33d8 FIX: Fixed env file 2025-08-26 19:30:59 +02:00
3e582e4d0c Merge branch 'test' 2025-08-26 19:26:56 +02:00
5715ba5c98 FIX: replaced environment with the proper .env file 2025-08-26 17:39:00 +02:00
76e58b602f FIX: Actually forgot to save gitignore :D 2025-08-26 17:33:48 +02:00
3fe7e47c3e ADD: gitignore after VM restore 2025-08-26 17:32:40 +02:00
527d330e72 CHANGE: Removed environment from docker compose 2025-08-26 17:31:19 +02:00
d5f483dbc8 TEST: Complex docker-compose rework. Not tested yet 2025-08-25 19:14:43 +02:00
34 changed files with 622 additions and 177 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env

View File

@@ -14,11 +14,14 @@ services:
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 6875:80
- "6875:80"
restart: unless-stopped
depends_on:
- bookstack_database
bookstack_database:
condition: service_healthy
networks:
- bookstack-net
bookstack_database:
image: lscr.io/linuxserver/mariadb
container_name: bookstack_database
@@ -32,4 +35,15 @@ services:
- MYSQL_PASSWORD=${DB_USER_PASS}
volumes:
- ${PATH_TO_DB}:/config
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 10s
timeout: 5s
retries: 5
networks:
- bookstack-net
networks:
bookstack-net:
driver: bridge

View File

@@ -0,0 +1,15 @@
services:
cloudflare-ddns:
image: favonia/cloudflare-ddns:latest
container_name: cloudflare-ddns
env_file: .env
network_mode: host
restart: always
user: "1000:1000"
read_only: true
cap_drop: [all]
security_opt: [no-new-privileges:true]
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
- DOMAINS=${DOMAINS}
- PROXIED=true

View File

@@ -1,37 +0,0 @@
services:
cloudflare-ddns:
image: favonia/cloudflare-ddns:latest
container_name: cloudflare-ddns
env_file: .env
# Choose the appropriate tag based on your need:
# - "latest" for the latest stable version (which could become 2.x.y
# in the future and break things)
# - "1" for the latest stable version whose major version is 1
# - "1.x.y" to pin the specific version 1.x.y
network_mode: host
# This bypasses network isolation and makes IPv6 easier (optional; see below)
restart: always
# Restart the updater after reboot
user: "1000:1000"
# Run the updater with specific user and group IDs (in that order).
# You can change the two numbers based on your need.
read_only: true
# Make the container filesystem read-only (optional but recommended)
cap_drop: [all]
# Drop all Linux capabilities (optional but recommended)
security_opt: [no-new-privileges:true]
# Another protection to restrict superuser privileges (optional but recommended)
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# Your Cloudflare API token
- DOMAINS=${DOMAINS}
# Your domains (separated by commas)
- PROXIED=true
# Tell Cloudflare to cache webpages and hide your IP (optional)
#networks:
# LAN0:
# external: true
# name: LAN0
# Introduce custom Docker networks to the 'services' in this file. A common use case
# for this is binding one of the 'services' to a specific network interface available at
# Docker's host. This section is required for the 'networks' section of each 'services'.

View File

@@ -0,0 +1,15 @@
# InfluxDB credentials
INFLUXDB_PASSWORD=influxdb_secret_password
# Grafana credentials
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin
# Garmin Connect credentials
GARMINCONNECT_EMAIL=your_garmin_email@example.com
GARMINCONNECT_PASSWORD=your_garmin_password_base64_encoded
# Paths for persistent data
GARMINCONNECT_TOKENS=./garminconnect-tokens
PATH_TO_INFLUXDB_DATA=./influxdb_data
PATH_TO_GRAFANA_DATA=./grafana_data

View File

@@ -0,0 +1,76 @@
services:
garmin-fetch-data:
restart: unless-stopped
image: thisisarpanghosh/garmin-fetch-data:latest
container_name: garmin-fetch-data
depends_on:
influxdb:
condition: service_healthy
volumes:
- garminconnect_tokens:/home/appuser/.garminconnect # persisted tokens storage (named volume)
environment:
- INFLUXDB_HOST=influxdb
- INFLUXDB_PORT=8086
- INFLUXDB_USERNAME=influxdb_user
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
- INFLUXDB_DATABASE=GarminStats
- UPDATE_INTERVAL_SECONDS=300
- LOG_LEVEL=INFO
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
networks:
- garmin-grafana-net
influxdb:
restart: unless-stopped
container_name: influxdb
hostname: influxdb
image: influxdb:latest
environment:
- INFLUXDB_DB=GarminStats
- INFLUXDB_USER=influxdb_user
- INFLUXDB_USER_PASSWORD=${INFLUXDB_PASSWORD}
- INFLUXDB_DATA_INDEX_VERSION=tsi1
ports:
- '8086:8086'
volumes:
- influxdb_data:/var/lib/influxdb
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- garmin-grafana-net
grafana:
restart: unless-stopped
container_name: grafana
hostname: grafana
image: grafana/grafana:latest
environment:
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
volumes:
- grafana_data:/var/lib/grafana
ports:
- '3000:3000'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 10s
timeout: 5s
retries: 5
networks:
- garmin-grafana-net
networks:
garmin-grafana-net:
driver: bridge
volumes:
garminconnect_tokens:
name: garminconnect_tokens
influxdb_data:
name: influxdb_data
grafana_data:
name: grafana_data

View File

@@ -1,52 +0,0 @@
services:
garmin-fetch-data:
restart: unless-stopped
image: thisisarpanghosh/garmin-fetch-data:latest
container_name: garmin-fetch-data
depends_on:
- influxdb
volumes:
- ${GARMINCONNECT_TOKENS}:/home/appuser/.garminconnect # (persistant tokens storage - garminconnect-tokens folder must be owned by 1000:1000)
environment:
- INFLUXDB_HOST=influxdb
- INFLUXDB_PORT=8086
- INFLUXDB_USERNAME=influxdb_user
- INFLUXDB_PASSWORD=influxdb_secret_password
- INFLUXDB_DATABASE=GarminStats
- UPDATE_INTERVAL_SECONDS=300
- LOG_LEVEL=INFO
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
influxdb:
restart: unless-stopped
container_name: influxdb
hostname: influxdb
environment:
- INFLUXDB_DB=GarminStats
- INFLUXDB_USER=influxdb_user
- INFLUXDB_USER_PASSWORD=influxdb_secret_password
- INFLUXDB_DATA_INDEX_VERSION=tsi1
ports:
- '8086:8086'
volumes:
- influxdb_data:/var/lib/influxdb
image: 'influxdb:1.11'
grafana:
restart: unless-stopped
container_name: grafana
hostname: grafana
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
ports:
- '3000:3000'
image: 'grafana/grafana:latest'
volumes:
influxdb_data:
grafana_data:

View File

@@ -1,12 +1,23 @@
# An image from abesnier that works as an all-in-one and does not require database initiation.
# I don't know if it has any limitations. For my needs it fits perfectly.
services:
guacamole:
image: abesnier/guacamole:latest
restart: unless-stopped
container_name: guacamole
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 8080:8080
# An image from abesnier that works as an all-in-one and does not require database initiation.
# I don't know if it has any limitations. For my needs it fits perfectly.
services:
guacamole:
image: abesnier/guacamole:latest
restart: unless-stopped
container_name: guacamole
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 8080:8080
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- guacamole-net
networks:
guacamole-net:
driver: bridge

2
heimdall/.env.example Normal file
View File

@@ -0,0 +1,2 @@
# Path to the Heimdall config folder
PATH_TO_CONFIG=/home/mbuz/docker/heimdall/config

View File

@@ -0,0 +1,25 @@
services:
heimdall:
image: lscr.io/linuxserver/heimdall:latest
container_name: heimdall
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Warsaw
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- "80:80"
- "443:443"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- heimdall-net
networks:
heimdall-net:
driver: bridge

View File

@@ -1,16 +0,0 @@
version: "2.1"
services:
heimdall:
image: lscr.io/linuxserver/heimdall:latest
container_name: heimdall
environment:
- PUID=1000
- PGID=1000
- TZ=Europe/Warsaw
volumes:
- /home/mbuz/docker/heimdall/config:/config
ports:
- 80:80
- 443:443
restart: unless-stopped

View File

@@ -1,10 +1,23 @@
services:
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
ports:
- 3001:3000
volumes:
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
restart: unless-stopped
services:
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
ports:
- "3001:3000"
env_file:
- .env # Make sure this file exists and contains necessary environment variables (chek .env.example)
volumes:
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3000"]
interval: 30s
timeout: 10s
retries: 3
networks:
- homepage-net
networks:
homepage-net:
driver: bridge

View File

@@ -30,6 +30,8 @@ services:
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich-net
immich-machine-learning:
container_name: immich_machine_learning
@@ -46,6 +48,8 @@ services:
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich-net
redis:
container_name: immich_redis
@@ -53,6 +57,8 @@ services:
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
networks:
- immich-net
database:
container_name: immich_postgres
@@ -84,6 +90,12 @@ services:
-c shared_buffers=512MB
-c wal_compression=on
restart: unless-stopped
networks:
- immich-net
volumes:
model-cache:
networks:
immich-net:
driver: bridge

View File

@@ -0,0 +1,18 @@
services:
it-tools:
image: 'corentinth/it-tools:latest'
ports:
- '8182:80' # change if needed
restart: unless-stopped
container_name: it-tools
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- it-tools-net
networks:
it-tools-net:
driver: bridge

View File

@@ -1,7 +0,0 @@
services:
it-tools:
image: 'corentinth/it-tools:latest'
ports:
- '8182:80' # change if needed
restart: unless-stopped
container_name: it-tools

152
komodo/.env.example Normal file
View File

@@ -0,0 +1,152 @@
####################################
# 🦎 KOMODO COMPOSE - VARIABLES 🦎 #
####################################
## These compose variables can be used with all Komodo deployment options.
## Pass these variables to the compose up command using `--env-file komodo/compose.env`.
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
## Stick to a specific version, or use `latest`
COMPOSE_KOMODO_IMAGE_TAG=latest
## Store dated database backups on the host - https://komo.do/docs/setup/backup
COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups
## DB credentials
KOMODO_DB_USERNAME=admin
KOMODO_DB_PASSWORD=admin
## Configure a secure passkey to authenticate between Core / Periphery.
KOMODO_PASSKEY=a_random_passkey
## Set your time zone for schedules
## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Etc/UTC
#=-------------------------=#
#= Komodo Core Environment =#
#=-------------------------=#
## Full variable list + descriptions are available here:
## 🦎 https://github.com/moghtech/komodo/blob/main/config/core.config.toml 🦎
## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets.
## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples
## Used for Oauth / Webhook url suggestion / Caddy reverse proxy.
KOMODO_HOST=https://demo.komo.do
## Displayed in the browser tab.
KOMODO_TITLE=Komodo
## Create a server matching this address as the "first server".
## Use `https://host.docker.internal:8120` when using systemd-managed Periphery.
KOMODO_FIRST_SERVER=https://periphery:8120
## Give the first server a custom name.
KOMODO_FIRST_SERVER_NAME=Local
## Make all buttons just double-click, rather than the full confirmation dialog.
KOMODO_DISABLE_CONFIRM_DIALOG=false
## Rate Komodo polls your servers for
## status / container status / system stats / alerting.
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min, 15-min
## Default: 15-sec
KOMODO_MONITORING_INTERVAL="15-sec"
## Interval at which to poll Resources for any updates / automated actions.
## Options: 15-min, 1-hr, 2-hr, 6-hr, 12-hr, 1-day
## Default: 1-hr
KOMODO_RESOURCE_POLL_INTERVAL="1-hr"
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
KOMODO_WEBHOOK_SECRET=a_random_secret
## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE
KOMODO_JWT_SECRET=a_random_jwt_secret
## Time to live for jwt tokens.
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk
KOMODO_JWT_TTL="1-day"
## Enable login with username + password.
KOMODO_LOCAL_AUTH=true
## Set the initial admin username created upon first launch.
## Comment out to disable initial user creation,
## and create first user using signup button.
KOMODO_INIT_ADMIN_USERNAME=admin
## Set the initial admin password
KOMODO_INIT_ADMIN_PASSWORD=changeme
## Disable new user signups.
KOMODO_DISABLE_USER_REGISTRATION=false
## All new logins are auto enabled
KOMODO_ENABLE_NEW_USERS=false
## Disable non-admins from creating new resources.
KOMODO_DISABLE_NON_ADMIN_CREATE=false
## Allows all users to have Read level access to all resources.
KOMODO_TRANSPARENT_MODE=false
## Prettier logging with empty lines between logs
KOMODO_LOGGING_PRETTY=false
## More human readable logging of startup config (multi-line)
KOMODO_PRETTY_STARTUP_CONFIG=false
## OIDC Login
KOMODO_OIDC_ENABLED=false
## Must reachable from Komodo Core container
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
## Change the host to one reachable be reachable by users (optional if it is the same as above).
## DO NOT include the `path` part of the URL.
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
## Your OIDC client id
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
## Your OIDC client secret.
## If your provider supports PKCE flow, this can be ommitted.
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE
## Make usernames the full email.
## Note. This does not work for all OIDC providers.
# KOMODO_OIDC_USE_FULL_EMAIL=true
## Add additional trusted audiences for token claims verification.
## Supports comma separated list, and passing with _FILE (for compose secrets).
# KOMODO_OIDC_ADDITIONAL_AUDIENCES=abc,123 # Alt: KOMODO_OIDC_ADDITIONAL_AUDIENCES_FILE
## Github Oauth
KOMODO_GITHUB_OAUTH_ENABLED=false
# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE
# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE
## Google Oauth
KOMODO_GOOGLE_OAUTH_ENABLED=false
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
## Aws - Used to launch Builder instances.
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
#=------------------------------=#
#= Komodo Periphery Environment =#
#=------------------------------=#
## Full variable list + descriptions are available here:
## 🦎 https://github.com/moghtech/komodo/blob/main/config/periphery.config.toml 🦎
## Specify the root directory used by Periphery agent.
PERIPHERY_ROOT_DIRECTORY=/etc/komodo
## Periphery passkeys must include KOMODO_PASSKEY to authenticate.
PERIPHERY_PASSKEYS=${KOMODO_PASSKEY}
## Specify whether to disable the terminals feature
## and disallow remote shell access (inside the Periphery container).
PERIPHERY_DISABLE_TERMINALS=false
## Enable SSL using self signed certificates.
## Connect to Periphery at https://address:8120.
PERIPHERY_SSL_ENABLED=true
## If the disk size is overreporting, can use one of these to
## whitelist / blacklist the disks to filter them, whichever is easier.
## Accepts comma separated list of paths.
## Usually whitelisting just /etc/hostname gives correct size.
PERIPHERY_INCLUDE_DISK_MOUNTS=/etc/hostname
# PERIPHERY_EXCLUDE_DISK_MOUNTS=/snap,/etc/repos
## Prettier logging with empty lines between logs
PERIPHERY_LOGGING_PRETTY=false
## More human readable logging of startup config (multi-line)
PERIPHERY_PRETTY_STARTUP_CONFIG=false

13
komodo/agent-compose.yaml Normal file
View File

@@ -0,0 +1,13 @@
services:
periphery:
image: ghcr.io/moghtech/komodo-periphery:latest
container_name: komodo-periphery
restart: unless-stopped
ports:
- "8120:8120"
env_file:
- .env
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /proc:/proc
- /etc/komodo:/etc/komodo

View File

@@ -0,0 +1,79 @@
################################
# 🦎 KOMODO COMPOSE - MONGO 🦎 #
################################
## This compose file will deploy:
## 1. MongoDB
## 2. Komodo Core
## 3. Komodo Periphery
services:
mongo:
image: mongo:4.4.18
container_name: komodo-mongo
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
command: --quiet --wiredTigerCacheSizeGB 0.25
restart: unless-stopped
# ports:
# - 27017:27017
volumes:
- mongo-data:/data/db
- mongo-config:/data/configdb
environment:
MONGO_INITDB_ROOT_USERNAME: ${KOMODO_DB_USERNAME}
MONGO_INITDB_ROOT_PASSWORD: ${KOMODO_DB_PASSWORD}
core:
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
container_name: komodo-core
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
depends_on:
- mongo
ports:
- 9120:9120
env_file:
- .env
environment:
KOMODO_DATABASE_ADDRESS: mongo:27017
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
volumes:
## Store dated backups of the database - https://komo.do/docs/setup/backup
## No need to have bind mounts because of the ability to create backups on a separate volume
- ${COMPOSE_KOMODO_BACKUPS_PATH}:/backups
## Store sync files on server
# - /path/to/syncs:/syncs
## Optionally mount a custom core.config.toml
# - /path/to/core.config.toml:/config/config.toml
## Allows for systemd Periphery connection at
## "https://host.docker.internal:8120"
# extra_hosts:
# - host.docker.internal:host-gateway
## Deploy Periphery container using this block,
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
container_name: komodo-periphery
labels:
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
restart: unless-stopped
env_file:
- .env
volumes:
## Mount external docker socket
- /var/run/docker.sock:/var/run/docker.sock
## Allow Periphery to see processes outside of container
- /proc:/proc
## Specify the Periphery agent root directory.
## Must be the same inside and outside the container,
## or docker will get confused. See https://github.com/moghtech/komodo/discussions/180.
## Default: /etc/komodo.
- ${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}:${PERIPHERY_ROOT_DIRECTORY:-/etc/komodo}
volumes:
# Mongo
mongo-data:
mongo-config:

View File

@@ -19,4 +19,15 @@ services:
PUID: 1000
PGID: 1000
TZ: Europe/Warsaw
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000"]
interval: 30s
timeout: 10s
retries: 3
networks:
- mealie-net
networks:
mealie-net:
driver: bridge

12
n8n/.env.example Normal file
View File

@@ -0,0 +1,12 @@
# Your timezone
GENERIC_TIMEZONE=Europe/Warsaw
# Directory where n8n will store files
PATH_TO_FILES=/path/to/n8n/files
# Domain for n8n
DOMAIN_NAME=mbuz.uk
SUBDOMAIN=automate
N8N_HOST=automate.mbuz.uk
N8N_PROTOCOL=https
# URL to webhook for the webhook triggers
WEBHOOK_URL=https://automate.mbuz.uk/
NODE_ENV=production

19
n8n/docker-compose.yaml Normal file
View File

@@ -0,0 +1,19 @@
services:
n8n:
image: docker.n8n.io/n8nio/n8n
container_name: n8n
restart: unless-stopped
ports:
- 5678:5678
env_file:
- .env
volumes:
- n8n_data:/home/node/.n8n
- ${PATH_TO_FILES}:/files
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:5678/healthz || exit 1"]
interval: 1m
timeout: 10s
retries: 3
volumes:
n8n_data:

View File

@@ -1,31 +1,35 @@
version: '3.3'
services:
nextcloud:
nextcloud:
image: lscr.io/linuxserver/nextcloud:latest
container_name: nextcloud
env_file:
- stack.env
- .env
environment:
- PUID=1000
- PGID=1000
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
- TZ=${TZ}
volumes:
- ${CONFIG}:/config
- ${DATA}:/data
ports:
- 5443:443
- "5443:443"
restart: unless-stopped
links:
- nextcloud-mariadb
depends_on:
- nextcloud-mariadb
nextcloud-mariadb:
nextcloud-mariadb:
condition: service_healthy
networks:
- nextcloud
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
interval: 30s
timeout: 10s
retries: 3
nextcloud-mariadb:
image: lscr.io/linuxserver/mariadb:latest
container_name: nextloud-mariadb
container_name: nextcloud-mariadb
environment:
- PUID=1000
- PGID=1000
@@ -34,15 +38,20 @@ services:
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
volumes:
- ${MARIADB}:/config
ports:
- 5306:3306
- "5306:3306"
restart: unless-stopped
networks:
- nextcloud
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 10s
timeout: 5s
retries: 5
networks:
default:
nextcloud:
name: nextcloud
driver: bridge
driver: bridge

View File

@@ -11,4 +11,9 @@ services:
volumes:
- ${PGADMIN_DATA}:/var/lib/pgadmin
extra_hosts:
- "host.docker.internal:host-gateway"
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/misc/ping || exit 1"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -8,4 +8,9 @@ services:
volumes:
- ${PORTAINER_DATA}:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/api/status"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -0,0 +1,29 @@
services:
semaphore:
container_name: semaphore-ui
image: semaphoreui/semaphore:latest
restart: unless-stopped
ports:
- "3030:3000"
env_file:
- .env
volumes:
- semaphore-data:/var/lib/semaphore
- semaphore-config:/etc/semaphore
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
networks:
- semaphore-net
volumes:
semaphore-data:
driver: local
semaphore-config:
driver: local
networks:
semaphore-net:
driver: bridge

View File

@@ -1,17 +0,0 @@
services:
semaphore:
ports:
- 3030:3000
image: semaphoreui/semaphore:v2.16.18
environment:
SEMAPHORE_DB_DIALECT: sqlite
SEMAPHORE_ADMIN: ${ADMIN_USER}
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
volumes:
- semaphore-data:/var/lib/semaphore
- semaphore-config:/etc/semaphore
volumes:
semaphore-data:
semaphore-config:

View File

@@ -32,6 +32,11 @@ services:
max-size: 10m
ports:
- '9091:9091'
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 && ls /data || exit 1"]
interval: 1m
timeout: 10s
retries: 3
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent

View File

@@ -1,4 +1,3 @@
---
services:
transmission:
image: lscr.io/linuxserver/transmission:latest
@@ -22,3 +21,8 @@ services:
- 51413:51413
- 51413:51413/udp
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 || exit 1"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -8,4 +8,9 @@ services:
volumes:
- ${PATH_TO_DATA}:/data
ports:
- 8033:80
- 8033:80
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/ || exit 1"]
interval: 1m
timeout: 10s
retries: 3

6
watchtower/.env.example Normal file
View File

@@ -0,0 +1,6 @@
# Timezone for watchtower
TZ=Europe/Warsaw
# A space-separated list of container names for Watchtower to monitor.
# For example: WATCHTOWER_CONTAINERS="nginx-proxy-manager bookstack"
WATCHTOWER_CONTAINERS=""

View File

@@ -0,0 +1,13 @@
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_STOPPED=true
- WATCHTOWER_POLL_INTERVAL=3600
- TZ=${TZ}
command: ${WATCHTOWER_CONTAINERS}
restart: unless-stopped

View File

@@ -13,4 +13,9 @@ services:
# devices:
# - /dev/dri:/dev/dri #optional
shm_size: "2gb" #optional
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1"]
interval: 1m
timeout: 10s
retries: 3