Compare commits

61 Commits

Author SHA1 Message Date
6ca0b7b5d3 TEST: Complex docker-compose rework. Not tested yet 2025-08-25 19:14:43 +02:00
8d8b40f2e6 FIX: Fixed problems with .env file. Added separate ansible host 2025-08-24 23:21:41 +02:00
21109f160e FIX: Fixed incorrect .env file and updated inventory to have a separate Ansible host 2025-08-24 23:09:32 +02:00
cd077517a0 ADD: Script to run via docker on Ansible host 2025-08-24 22:14:48 +02:00
d3a8cc66ad ADD: Script to run via docker on Ansible host 2025-08-24 22:12:53 +02:00
b12b1fa924 Merge branch 'test' 2025-08-24 21:55:10 +02:00
7919919d23 CHANGE: Bind mounts were replaced with the named volumes 2025-08-24 21:53:15 +02:00
f590fc777f CHANGE: Removed volumes 2025-08-24 21:27:24 +02:00
742fc0d421 Merge branch 'test' of gitea-repo:mbuz/homelab into test 2025-08-24 21:09:23 +02:00
44104a9a57 ADD: Semaphore UI initial commit 2025-08-24 21:08:16 +02:00
1609167399 CHANGE: removed not needed image from the Docker/immich directory 2025-08-24 21:05:16 +02:00
24861aaa70 ADD: Semaphore UI initial commit 2025-08-24 21:04:24 +02:00
ab9059374e ADD: Semaphore UI initial commit 2025-08-24 21:03:19 +02:00
8c285e2682 ADD: Semaphore UI initial commit 2025-08-24 21:02:44 +02:00
dd546bc033 CHANGE: Replaced an example file with the official one 2025-08-24 20:42:41 +02:00
8391a77535 change: added more secrects (some of which are used in playbooks) to the example file 2025-08-24 19:56:39 +02:00
2a091a6f21 change: removed not needed image from the Docker/immich directory 2025-08-24 19:19:26 +02:00
11e1b9b89f fix: remover non-interactive environment to testh without it 2025-08-24 18:32:41 +02:00
8c239b0dc9 Fixed sub group (forgot to adjust it) 2025-08-24 18:21:10 +02:00
6ba8ee4d79 redefined groupping to separate ubuntu hosts from proxmox 2025-08-24 18:20:16 +02:00
b6a9e5912f Changes in inventory 2025-08-24 18:14:45 +02:00
a3c6d0c0d6 forgot to add oracle into ubuntu sub group 2025-08-24 18:05:47 +02:00
88f8edcd0d changes in the inventory structure 2025-08-24 18:05:14 +02:00
d904f10434 Changed addressed to be able to run playbooks on the control node 2025-08-24 17:59:02 +02:00
d54ef85c48 Key file adjustmen 2025-08-24 17:57:29 +02:00
a2cc1eafb6 Adjusted inventory file with the real IP addresses. I do not see the reason to skip local IPs. 2025-08-24 17:54:46 +02:00
3b68e15bb9 non-interactive apt run 2025-08-24 17:08:48 +02:00
71e8d19574 Autoremove added 2025-08-24 16:54:24 +02:00
8ba2e93e77 Adjusted apt update to provide a feedback if upgrade is needed and wait one hour for cache update. Added localhost entity into the hosts.ini 2025-08-24 16:28:36 +02:00
a3d9c54ba0 Adjusted README 2025-08-24 16:00:49 +02:00
95e8475baf Added "Managed by Ansible" comment 2025-08-24 15:49:33 +02:00
96fd1e13b2 Fixed another typo in the host name 2025-08-24 15:45:46 +02:00
71edcd698c Added agent2 installation 2025-08-24 15:44:51 +02:00
d2c632616a Changes in the connection configuration 2025-08-24 15:29:37 +02:00
47b9b71cd1 Fixed a typo in the host name during the status check 2025-08-24 15:24:20 +02:00
c372a781e5 Fixed some syntax problems in yml 2025-08-24 15:23:28 +02:00
81b03b095c Fixed broken link one more time :D 2025-08-24 15:20:03 +02:00
927ec6cd5a Fixed incorrect file path 2025-08-24 15:18:05 +02:00
1aa1b4f34e Fixed broken link 2025-08-24 15:16:19 +02:00
4cd491619e Changes in Zabbix repository installation 2025-08-24 15:12:44 +02:00
9666089f97 Removed duplicated statement for ssh key file 2025-08-24 15:09:50 +02:00
5c3fcbf58a Added ansible public key 2025-08-24 15:08:01 +02:00
22593070e4 Removed group_vars and now global variables are defined in the inventory file 2025-08-24 15:06:58 +02:00
7cebd1bef6 added global variable for user and key to use 2025-08-24 15:04:38 +02:00
25a9bcf507 Chages in playbook to connect as a root user. 2025-08-24 14:52:14 +02:00
7a5ba5c7c8 added root user to vars 2025-08-24 14:49:00 +02:00
b1ae2d753f Changed vars to use private key. Added some comments 2025-08-24 14:47:12 +02:00
13a9e853f1 Fixed hosts: 2025-08-24 14:41:43 +02:00
b6c0f4f63d typo in host name 2025-08-24 14:33:46 +02:00
4b3f00134a Improvements in the proxy deply. Now it have two separate files and uses hostname as a proxy hostname 2025-08-24 14:33:11 +02:00
1197e92bd8 Changed file structure. Added zabbix proxy playbook 2025-08-24 14:30:55 +02:00
7bd28950ca Playbook will also install software-properties-common. Removed hardcoded host name. 2025-08-24 13:27:53 +02:00
7c71697722 Improved readme file 2025-08-24 13:16:14 +02:00
643add64cf Removed mode parameter as it should be automatically handled by the module 2025-08-24 13:11:49 +02:00
7c22b770f0 Changed vars to group vars for test 2025-08-24 13:09:19 +02:00
a8f5092689 added key file into vars 2025-08-24 13:06:10 +02:00
10fddc7d44 fixed quotes typo 2025-08-24 13:00:32 +02:00
5091aaa52c Added example of inventory 2025-08-24 12:57:47 +02:00
62c2842610 Creted test playbook to setup LXC with my settings. Added secrets example and variables 2025-08-24 12:53:40 +02:00
3003dd730e Changes in gitignore 2025-08-24 11:48:12 +02:00
45e24d115a Revert "Some changes in Ansible configuration"
This reverts commit ea113ad443.
2025-08-24 11:44:27 +02:00
38 changed files with 614 additions and 153 deletions

3
.gitignore vendored
View File

@@ -1,3 +1,2 @@
.vscode/
Ansible/secrets.yml
Ansible/inventory/hosts.ini
Ansible/secrets.yml

View File

@@ -1,18 +1,21 @@
# Ansible Playbooks
# Ansible for Homelab
This directory contains Ansible playbooks for automating server configuration and management tasks.
This Ansible setup is designed to automate the configuration and maintenance of servers and applications in the homelab. It includes playbooks for common tasks, inventory management for different environments, and a structured way to handle variables and secrets.
## Playbooks
## Directory Structure
- **apt_upgrade.yml**: This playbook updates all packages on a Debian/Ubuntu server.
- **zabbix_agent_upgrade.yml**: This playbook upgrades the Zabbix agent on a server.
- `inventory/`: Contains the inventory files that define the hosts and groups of hosts managed by Ansible.
- `playbooks/`: Contains the Ansible playbooks for various automation tasks.
- `secrets.yml`: This file is intended to store sensitive data like passwords and API keys. It is recommended to encrypt this file using Ansible Vault.
- `example_secrets.yml`: An example secrets file.
- `vars.yml`: This file can be used to store non-sensitive variables that are used across multiple playbooks.
## Usage
## Getting Started
To use these playbooks, you will need to have Ansible installed on your control machine. You will also need to have an inventory file that defines the hosts you want to manage.
Once you have Ansible and an inventory file set up, you can run a playbook using the following command:
```
ansible-playbook -i <inventory_file> <playbook>.yml
```
1. **Install Ansible:** Make sure you have Ansible installed on your control machine.
2. **Inventory:** Update the `inventory/hosts.ini` file with the IP addresses and connection details for your servers.
3. **Secrets:** Create a `secrets.yml` file based on the `example_secrets.yml` template and encrypt it using Ansible Vault for security.
4. **Run a Playbook:** You can run a playbook using the `ansible-playbook` command. For example:
```bash
ansible-playbook -i inventory/hosts.ini playbooks/apt_upgrade.yml
```

View File

@@ -1,24 +0,0 @@
- name: Upgrade packages
hosts: vms
become: true
tasks:
- name: Update apt cache
ansible.builtin.apt:
update_cache: true
register: cache_updated
- name: Upgrade all packages
ansible.builtin.apt:
upgrade: "yes"
when: cache_updated.changed or cache_updated.rc == 0
- name: Autoremove unnecessary packages
ansible.builtin.apt:
autoremove: true
when: cache_updated.changed or cache_updated.rc == 0
- name: Autoclean apt cache
ansible.builtin.apt:
autoclean: true
when: cache_updated.changed or cache_updated.rc == 0

View File

@@ -0,0 +1,7 @@
# Copy this into secrets.yml and replate with a real values
ansible_password: 'REPLACE_WITH_ROOT_PASSWORD'
# Zabbix proxy parameters for connecting to Zabbix server
zabbix_server_address: 'x.x.x.x'
zabbix_psk_identity: '<zabbix_psk_identity'
zabbix_proxy_hostname: '<zabbix_proxy_hostname>' # if needed, in the actual playbook it is set to the hostname of the target
zabbix_proxy_psk: 'REPLACE_WITH_ZABBIX_PSK'

View File

View File

@@ -0,0 +1,34 @@
[all:vars]
ansible_user = mbuz
ansible_ssh_private_key_file = /home/mbuz/.ssh/id_ed25519
[proxmox]
proxmox_host ansible_host=10.0.0.1
[ubuntu_servers]
raspberry-pi ansible_host=10.0.0.5
oracle-arm ansible_host=130.61.76.209 ansible_user=ubuntu
[docker]
docker-apps ansible_host=10.0.0.101
docker-cloud ansible_host=10.0.0.102
[filestorage]
truenas ansible_host=10.0.0.200
[lxc]
gitea ansible_host=10.0.0.108
zabbix-proxy ansible_host=10.0.0.110
pi-hole ansible_host=10.0.0.104
ansible ansible_host=10.0.0.111
#localhost ansible_connection=local # for testing playbooks on the control node
[pbs]
proxmox-backup ansible_host=10.0.0.201
# This is a group of groups. It includes all Ubuntu based systems.
[ubuntu:children]
docker
ubuntu_servers
lxc

View File

@@ -0,0 +1,29 @@
---
- name: Upgrade all apt packages
hosts: ubuntu
become: yes
tasks:
- name: Update apt cache
ansible.builtin.apt:
update_cache: yes
cache_valid_time: 3600
- name: Upgrade all apt packages
ansible.builtin.apt:
upgrade: dist
# environment:
# DEBIAN_FRONTEND: noninteractive
- name: Autoremove unused packages
ansible.builtin.apt:
autoremove: yes
- name: Check if a reboot is required
ansible.builtin.stat:
path: /var/run/reboot-required
register: reboot_required_file
- name: Display reboot message
ansible.builtin.debug:
msg: "A reboot is required to apply the latest updates."
when: reboot_required_file.stat.exists

View File

@@ -0,0 +1,63 @@
---
- name: Secure and Configure a New LXC Container
hosts: 'lxc' # Hosts or group defined in your inventory
remote_user: root
tasks:
- name: 1. Create user '{{ target_user }}'
ansible.builtin.user:
name: '{{ target_user }}'
shell: /bin/bash
groups: sudo # Add to sudo (for Debian/Ubuntu)
state: present
- name: 1.1. Allow '{{ target_user }}' to use sudo without a password
ansible.builtin.copy:
dest: /etc/sudoers.d/90-{{ target_user }}-nopasswd
content: '{{ target_user }} ALL=(ALL) NOPASSWD: ALL'
mode: '0440'
validate: /usr/sbin/visudo -cf %s
- name: 2. Set up authorized_keys for '{{ target_user }}'
ansible.posix.authorized_key:
user: '{{ target_user }}'
key: "{{ item }}"
state: present
path: /home/{{ target_user }}/.ssh/authorized_keys
loop: "{{ my_public_keys }}"
# ansible.posix.authorized_key will create an .ssh directory with the correct permissions.
- name: 3. Lock password for '{{ target_user }}'
ansible.builtin.user:
name: '{{ target_user }}'
password_lock: yes
- name: 4.0. Install software-properties-common
ansible.builtin.apt:
name: software-properties-common
state: present
update_cache: yes
- name: 4.1. Disallow root login over SSH
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: '^#?PermitRootLogin'
line: 'PermitRootLogin no'
validate: /usr/sbin/sshd -t -f %s
notify: restart sshd
- name: 4.2. Disallow password authentication
ansible.builtin.lineinfile:
path: /etc/ssh/sshd_config
regexp: '^#?PasswordAuthentication'
line: 'PasswordAuthentication no'
validate: /usr/sbin/sshd -t -f %s
notify: restart sshd
handlers:
# This block will only run if at least one task sends a notification.
# This prevents unnecessary service restarts.
- name: 5. Restart sshd server
listen: "restart sshd"
ansible.builtin.service:
name: sshd
state: restarted

View File

@@ -1,16 +1,16 @@
- name: Upgrade zabbix agent
hosts: zagents
become: true
tasks:
- name: Ensure that Zabbix agent is at the latest version
ansible.builtin.apt:
name: zabbix-agent2
state: latest
register: zabbix_agent2_status
- name: Upgrade Zabbix agent if not latest
ansible.builtin.apt:
name: zabbix-agent2
upgrade: yes
- name: Upgrade zabbix agent
hosts: zagents
become: true
tasks:
- name: Ensure that Zabbix agent is at the latest version
ansible.builtin.apt:
name: zabbix-agent2
state: latest
register: zabbix_agent2_status
- name: Upgrade Zabbix agent if not latest
ansible.builtin.apt:
name: zabbix-agent2
upgrade: yes
when: zabbix_agent2_status.changed

View File

@@ -0,0 +1,115 @@
---
- name: Install and Configure Zabbix Proxy and Agent
hosts: zabbix-proxy # Assuming you have a group for zabbix proxy in your inventory
become: yes
vars_files:
- ../secrets.yml
tasks:
- name: Download Zabbix release package
ansible.builtin.get_url:
url: "https://repo.zabbix.com/zabbix/7.4/release/ubuntu/pool/main/z/zabbix-release/zabbix-release_latest_7.4+ubuntu24.04_all.deb"
dest: /tmp/zabbix-release.deb
- name: Install Zabbix release package
ansible.builtin.apt:
deb: /tmp/zabbix-release.deb
- name: Install Zabbix proxy and agent
ansible.builtin.apt:
name:
- zabbix-proxy-sqlite3
- zabbix-agent2
state: present
update_cache: yes
- name: Create Zabbix proxy custom configuration file
ansible.builtin.copy:
dest: /etc/zabbix/zabbix_proxy.d/custom.conf
content: |
## Managed by Ansible - do not edit manually ##
## Changes will be overwritten ##
DBName=/tmp/zabbix_proxy
StartPollers=2
StartPreprocessors=1
StartTrappers=1
StartDiscoverers=1
StartDBSyncers=1
StartAgentPollers=2
EnableRemoteCommands=1
TLSConnect=psk
TLSAccept=psk
notify: restart zabbix-proxy
- name: Create Zabbix proxy connection configuration file
ansible.builtin.copy:
dest: /etc/zabbix/zabbix_proxy.d/connection.conf
content: |
## Managed by Ansible - do not edit manually ##
## Changes will be overwritten ##
Server={{ zabbix_server_address }}:10051
Hostname={{ ansible_facts.hostname }}
TLSPSKFile=/etc/zabbix/{{ ansible_facts.hostname }}.psk
TLSPSKIdentity={{ zabbix_psk_identity }}
notify: restart zabbix-proxy
- name: Create Zabbix proxy PSK file
ansible.builtin.copy:
dest: "/etc/zabbix/{{ ansible_facts.hostname }}.psk"
content: "{{ zabbix_proxy_psk }}"
owner: zabbix
group: zabbix
mode: '0600'
notify: restart zabbix-proxy
- name: Create Zabbix agent custom configuration file
ansible.builtin.copy:
dest: /etc/zabbix/zabbix_agent2.d/custom.conf
content: |
## Managed by Ansible - do not edit manually ##
## Changes will be overwritten ##
Hostname={{ ansible_facts.hostname }}
Server={{ hostvars['zabbix-proxy']['ansible_host'] }},{{ hostvars['raspberry-pi']['ansible_host'] }}
ServerActive={{ hostvars['zabbix-proxy']['ansible_host'] }};{{ hostvars['raspberry-pi']['ansible_host'] }}
notify: restart zabbix-agent2
- name: Create Zabbix agent user parameters file
ansible.builtin.copy:
dest: /etc/zabbix/zabbix_agent2.d/userparams.conf
content: |
## Managed by Ansible - do not edit manually ##
## Changes will be overwritten ##
AllowKey=system.run[*]
notify: restart zabbix-agent2
handlers:
- name: restart zabbix-proxy
ansible.builtin.service:
name: zabbix-proxy
state: restarted
enabled: yes
- name: restart zabbix-agent2
ansible.builtin.service:
name: zabbix-agent2
state: restarted
enabled: yes
- name: Verify Zabbix Services
hosts: zabbix-proxy
become: yes
tasks:
- name: Check if Zabbix services are running
ansible.builtin.service_facts:
- name: Assert that Zabbix proxy is running
ansible.builtin.assert:
that:
- "ansible_facts.services['zabbix-proxy.service'].state == 'running'"
fail_msg: "Zabbix proxy is not running"
success_msg: "Zabbix proxy is running"
- name: Assert that Zabbix agent is running
ansible.builtin.assert:
that:
- "ansible_facts.services['zabbix-agent2.service'].state == 'running'"
fail_msg: "Zabbix agent 2 is not running"
success_msg: "Zabbix agent 2 is running"

9
Ansible/vars.yml Normal file
View File

@@ -0,0 +1,9 @@
# User which will be created on the LXC containers to replace root
target_user: 'mbuz'
# List of public keys to be added to the target_user's authorized_keys file
my_public_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINSGj0dxaA38QSBVY3DZiPb+qmIuTFxGo0mt4sbmYDa3 mbuz@macbook-pro"
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOpvRkew+XpOAt7I/mizQbE/OJP1SO6NVl2/A1ZGzdU3 mbuz@windows-desktop"
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIGWMJbHDCB8XCxPGth1229A3W/sPpvJHO9xBvegv4Sx mbuz@macbook-air"
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJM2yLXiX45OgzhmKtr822gJaER/Ur/2yhRSiXI2AW+U mbuz@ansible"

View File

@@ -14,11 +14,14 @@ services:
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 6875:80
- "6875:80"
restart: unless-stopped
depends_on:
- bookstack_database
bookstack_database:
condition: service_healthy
networks:
- bookstack-net
bookstack_database:
image: lscr.io/linuxserver/mariadb
container_name: bookstack_database
@@ -32,4 +35,15 @@ services:
- MYSQL_PASSWORD=${DB_USER_PASS}
volumes:
- ${PATH_TO_DB}:/config
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 10s
timeout: 5s
retries: 5
networks:
- bookstack-net
networks:
bookstack-net:
driver: bridge

View File

@@ -3,35 +3,13 @@ services:
image: favonia/cloudflare-ddns:latest
container_name: cloudflare-ddns
env_file: .env
# Choose the appropriate tag based on your need:
# - "latest" for the latest stable version (which could become 2.x.y
# in the future and break things)
# - "1" for the latest stable version whose major version is 1
# - "1.x.y" to pin the specific version 1.x.y
network_mode: host
# This bypasses network isolation and makes IPv6 easier (optional; see below)
restart: always
# Restart the updater after reboot
user: "1000:1000"
# Run the updater with specific user and group IDs (in that order).
# You can change the two numbers based on your need.
read_only: true
# Make the container filesystem read-only (optional but recommended)
cap_drop: [all]
# Drop all Linux capabilities (optional but recommended)
security_opt: [no-new-privileges:true]
# Another protection to restrict superuser privileges (optional but recommended)
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
# Your Cloudflare API token
- DOMAINS=${DOMAINS}
# Your domains (separated by commas)
- PROXIED=true
# Tell Cloudflare to cache webpages and hide your IP (optional)
#networks:
# LAN0:
# external: true
# name: LAN0
# Introduce custom Docker networks to the 'services' in this file. A common use case
# for this is binding one of the 'services' to a specific network interface available at
# Docker's host. This section is required for the 'networks' section of each 'services'.

View File

@@ -0,0 +1,15 @@
# InfluxDB credentials
INFLUXDB_PASSWORD=influxdb_secret_password
# Grafana credentials
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=admin
# Garmin Connect credentials
GARMINCONNECT_EMAIL=your_garmin_email@example.com
GARMINCONNECT_PASSWORD=your_garmin_password_base64_encoded
# Paths for persistent data
GARMINCONNECT_TOKENS=./garminconnect-tokens
PATH_TO_INFLUXDB_DATA=./influxdb_data
PATH_TO_GRAFANA_DATA=./grafana_data

View File

@@ -4,49 +4,65 @@ services:
image: thisisarpanghosh/garmin-fetch-data:latest
container_name: garmin-fetch-data
depends_on:
- influxdb
influxdb:
condition: service_healthy
volumes:
- ${GARMINCONNECT_TOKENS}:/home/appuser/.garminconnect # (persistant tokens storage - garminconnect-tokens folder must be owned by 1000:1000)
environment:
- INFLUXDB_HOST=influxdb
- INFLUXDB_PORT=8086
- INFLUXDB_USERNAME=influxdb_user
- INFLUXDB_PASSWORD=influxdb_secret_password
- INFLUXDB_PASSWORD=${INFLUXDB_PASSWORD}
- INFLUXDB_DATABASE=GarminStats
- UPDATE_INTERVAL_SECONDS=300
- LOG_LEVEL=INFO
- GARMINCONNECT_EMAIL=${GARMINCONNECT_EMAIL}
- GARMINCONNECT_BASE64_PASSWORD=${GARMINCONNECT_PASSWORD} # (must be base64 encoded)
networks:
- garmin-grafana-net
influxdb:
restart: unless-stopped
container_name: influxdb
hostname: influxdb
image: influxdb:latest
environment:
- INFLUXDB_DB=GarminStats
- INFLUXDB_USER=influxdb_user
- INFLUXDB_USER_PASSWORD=influxdb_secret_password
- INFLUXDB_USER_PASSWORD=${INFLUXDB_PASSWORD}
- INFLUXDB_DATA_INDEX_VERSION=tsi1
ports:
- '8086:8086'
volumes:
- influxdb_data:/var/lib/influxdb
image: 'influxdb:1.11'
- ${PATH_TO_INFLUXDB_DATA}:/var/lib/influxdb
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8086/ping"]
interval: 10s
timeout: 5s
retries: 5
networks:
- garmin-grafana-net
grafana:
restart: unless-stopped
container_name: grafana
hostname: grafana
image: grafana/grafana:latest
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
volumes:
- grafana_data:/var/lib/grafana
- ${PATH_TO_GRAFANA_DATA}:/var/lib/grafana
ports:
- '3000:3000'
image: 'grafana/grafana:latest'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 10s
timeout: 5s
retries: 5
networks:
- garmin-grafana-net
volumes:
influxdb_data:
grafana_data:
networks:
garmin-grafana-net:
driver: bridge

View File

@@ -1,12 +1,23 @@
# An image from abesnier that works as an all-in-one and does not require database initiation.
# I don't know if it has any limitations. For my needs it fits perfectly.
services:
guacamole:
image: abesnier/guacamole:latest
restart: unless-stopped
container_name: guacamole
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 8080:8080
# An image from abesnier that works as an all-in-one and does not require database initiation.
# I don't know if it has any limitations. For my needs it fits perfectly.
services:
guacamole:
image: abesnier/guacamole:latest
restart: unless-stopped
container_name: guacamole
volumes:
- ${PATH_TO_CONFIG}:/config
ports:
- 8080:8080
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- guacamole-net
networks:
guacamole-net:
driver: bridge

View File

@@ -0,0 +1,2 @@
# Path to the Heimdall config folder
PATH_TO_CONFIG=/home/mbuz/docker/heimdall/config

View File

@@ -1,5 +1,3 @@
version: "2.1"
services:
heimdall:
image: lscr.io/linuxserver/heimdall:latest
@@ -9,8 +7,19 @@ services:
- PGID=1000
- TZ=Europe/Warsaw
volumes:
- /home/mbuz/docker/heimdall/config:/config
- ${PATH_TO_CONFIG}:/config
ports:
- 80:80
- 443:443
restart: unless-stopped
- "80:80"
- "443:443"
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- heimdall-net
networks:
heimdall-net:
driver: bridge

View File

@@ -1,12 +1,23 @@
services:
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
ports:
- 3001:3000
volumes:
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
env_file:
- .env
restart: unless-stopped
services:
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: homepage
ports:
- "3001:3000"
volumes:
- ${PATH_TO_CONFIG}:/app/config # Make sure your local config directory exists
- /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations
env_file:
- .env
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000"]
interval: 30s
timeout: 10s
retries: 3
networks:
- homepage-net
networks:
homepage-net:
driver: bridge

View File

@@ -1,21 +1,22 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=${UPLOAD_LOCATION}
# The location where your database files are stored
DB_DATA_LOCATION=${DB_DATA_LOCATION}
UPLOAD_LOCATION=./library
# The location where your database files are stored. Network shares are not supported for the database
DB_DATA_LOCATION=./postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=${IMMICH_VERSION:-release}
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=${DB_PASSWORD}
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=${DB_USERNAME}
DB_DATABASE_NAME=${DB_DATABASE_NAME}
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@@ -30,6 +30,8 @@ services:
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich-net
immich-machine-learning:
container_name: immich_machine_learning
@@ -46,6 +48,8 @@ services:
restart: unless-stopped
healthcheck:
disable: false
networks:
- immich-net
redis:
container_name: immich_redis
@@ -53,6 +57,8 @@ services:
healthcheck:
test: redis-cli ping || exit 1
restart: unless-stopped
networks:
- immich-net
database:
container_name: immich_postgres
@@ -84,6 +90,12 @@ services:
-c shared_buffers=512MB
-c wal_compression=on
restart: unless-stopped
networks:
- immich-net
volumes:
model-cache:
networks:
immich-net:
driver: bridge

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

View File

@@ -4,4 +4,15 @@ services:
ports:
- '8182:80' # change if needed
restart: unless-stopped
container_name: it-tools
container_name: it-tools
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
networks:
- it-tools-net
networks:
it-tools-net:
driver: bridge

View File

@@ -19,4 +19,15 @@ services:
PUID: 1000
PGID: 1000
TZ: Europe/Warsaw
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
BASE_URL: ${YOUR_DOMAIN:-https://mealie.yourdomain.com}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000"]
interval: 30s
timeout: 10s
retries: 3
networks:
- mealie-net
networks:
mealie-net:
driver: bridge

8
Docker/n8n/.env.example Normal file
View File

@@ -0,0 +1,8 @@
# Domain for n8n
DOMAIN=n8n.example.com
# Your timezone
GENERIC_TIMEZONE=Europe/Warsaw
# Directory where n8n will store files
PATH_TO_FILES=/path/to/n8n/files

View File

@@ -0,0 +1,24 @@
services:
n8n:
image: docker.n8n.io/n8nio/n8n
container_name: n8n
restart: unless-stopped
ports:
- 5678:5678
environment:
- N8N_HOST=${DOMAIN}
- N8N_PORT=5678
- N8N_PROTOCOL=https
- NODE_ENV=production
- WEBHOOK_URL=https://${DOMAIN}/
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
volumes:
- n8n_data:/home/node/.n8n
- ${PATH_TO_FILES}:/files
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:5678/healthz || exit 1"]
interval: 1m
timeout: 10s
retries: 3
volumes:
n8n_data:

View File

@@ -1,31 +1,35 @@
version: '3.3'
services:
nextcloud:
nextcloud:
image: lscr.io/linuxserver/nextcloud:latest
container_name: nextcloud
env_file:
- stack.env
- .env
environment:
- PUID=1000
- PGID=1000
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT}
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT}
- TZ=${TZ}
volumes:
- ${CONFIG}:/config
- ${DATA}:/data
ports:
- 5443:443
- "5443:443"
restart: unless-stopped
links:
- nextcloud-mariadb
depends_on:
- nextcloud-mariadb
nextcloud-mariadb:
nextcloud-mariadb:
condition: service_healthy
networks:
- nextcloud
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/status.php"]
interval: 30s
timeout: 10s
retries: 3
nextcloud-mariadb:
image: lscr.io/linuxserver/mariadb:latest
container_name: nextloud-mariadb
container_name: nextcloud-mariadb
environment:
- PUID=1000
- PGID=1000
@@ -34,15 +38,20 @@ services:
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
volumes:
- ${MARIADB}:/config
ports:
- 5306:3306
- "5306:3306"
restart: unless-stopped
networks:
- nextcloud
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 10s
timeout: 5s
retries: 5
networks:
default:
nextcloud:
name: nextcloud
driver: bridge
driver: bridge

View File

@@ -11,4 +11,9 @@ services:
volumes:
- ${PGADMIN_DATA}:/var/lib/pgadmin
extra_hosts:
- "host.docker.internal:host-gateway"
- "host.docker.internal:host-gateway"
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/misc/ping || exit 1"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -8,4 +8,9 @@ services:
volumes:
- ${PORTAINER_DATA}:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/api/status"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -0,0 +1,4 @@
SEMAPHORE_ADMIN=admin
SEMAPHORE_ADMIN_PASSWORD=changeme
SEMAPHORE_ADMIN_NAME=Admin
SEMAPHORE_ADMIN_EMAIL=admin@example.com

View File

@@ -0,0 +1,33 @@
services:
semaphore:
container_name: semaphore-ui
image: semaphoreui/semaphore:latest
restart: unless-stopped
ports:
- "3030:3000"
environment:
SEMAPHORE_DB_DIALECT: sqlite
SEMAPHORE_ADMIN: ${ADMIN_USER}
SEMAPHORE_ADMIN_PASSWORD: ${ADMIN_PASS}
SEMAPHORE_ADMIN_NAME: ${ADMIN_NAME}
SEMAPHORE_ADMIN_EMAIL: ${ADMIN_EMAIL}
volumes:
- semaphore-data:/var/lib/semaphore
- semaphore-config:/etc/semaphore
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
networks:
- semaphore-net
volumes:
semaphore-data:
driver: local
semaphore-config:
driver: local
networks:
semaphore-net:
driver: bridge

View File

@@ -0,0 +1,9 @@
docker run -d \
--restart unless-stopped \
--name semaphore \
-p 3030:3000 \
--env-file .env \
-e SEMAPHORE_DB_DIALECT=sqlite \
-v semaphore-data:/var/lib/semaphore \
-v semaphore-config:/etc/semaphore \
semaphoreui/semaphore:latest

View File

@@ -32,6 +32,11 @@ services:
max-size: 10m
ports:
- '9091:9091'
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 && ls /data || exit 1"]
interval: 1m
timeout: 10s
retries: 3
# Not all the countries and servers are supporting p2p, so you need to choose the right server. Here's the hint:
# https://support.nordvpn.com/hc/en-us/articles/20465085067665-NordVPN-proxy-setup-for-BitTorrent

View File

@@ -1,4 +1,3 @@
---
services:
transmission:
image: lscr.io/linuxserver/transmission:latest
@@ -22,3 +21,8 @@ services:
- 51413:51413
- 51413:51413/udp
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:9091 || exit 1"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -8,4 +8,9 @@ services:
volumes:
- ${PATH_TO_DATA}:/data
ports:
- 8033:80
- 8033:80
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost/ || exit 1"]
interval: 1m
timeout: 10s
retries: 3

View File

@@ -0,0 +1,6 @@
# Timezone for watchtower
TZ=Europe/Warsaw
# A space-separated list of container names for Watchtower to monitor.
# For example: WATCHTOWER_CONTAINERS="nginx-proxy-manager bookstack"
WATCHTOWER_CONTAINERS=""

View File

@@ -0,0 +1,13 @@
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_INCLUDE_STOPPED=true
- WATCHTOWER_POLL_INTERVAL=3600
- TZ=${TZ}
command: ${WATCHTOWER_CONTAINERS}
restart: unless-stopped

View File

@@ -13,4 +13,9 @@ services:
# devices:
# - /dev/dri:/dev/dri #optional
shm_size: "2gb" #optional
restart: unless-stopped
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/ || exit 1"]
interval: 1m
timeout: 10s
retries: 3