build(repo): structure the repo into postgresql subdirectory with separate template and internal tests
This commit is contained in:
8
postgresql/README.md
Normal file
8
postgresql/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# PostgreSQL Partitioning for Zabbix
|
||||
|
||||
This directory contains solutions for partitioning a Zabbix database running on PostgreSQL. Partitioning is essential for large Zabbix environments as it eliminates the need for the built-in Zabbix Housekeeper to aggressively delete old data row-by-row, replacing it with instant DDL operations that drop entire daily or monthly chunks.
|
||||
|
||||
## Implementations
|
||||
|
||||
- **[procedures](procedures/)**: The recommended Declarative (SQL-based) implementation. It uses native PostgreSQL procedures and features like `pg_cron` for entirely self-contained maintenance.
|
||||
- **[script](script/)**: External script-based management solution. (Coming soon)
|
||||
3
postgresql/script/README.md
Normal file
3
postgresql/script/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Script-based Partitioning
|
||||
|
||||
(Coming soon)
|
||||
32
postgresql/template/README.md
Normal file
32
postgresql/template/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Zabbix PostgreSQL Partitioning Monitoring
|
||||
|
||||
This template relies on Zabbix Agent 2 and its PostgreSQL plugin. It allows you to monitor the health of your partitioned PostgreSQL database tables. It uses a single master item to pull all metrics in bulk over a single database connection, dynamically distributing the numbers to Zabbix using Dependent Items.
|
||||
There are three item prototypes:
|
||||
1. Future Partitions Buffer: Number of future partitions to be created
|
||||
2. Total Size Bytes: Total size of the partitioned table in bytes
|
||||
3. Time Since Last Maintenance: Time since the last maintenance script was run
|
||||
They allows to monitor all the critical metrics and also they do have a triggers, which will create a problem in case something is wrong with the partitioning.
|
||||
|
||||
### Setup
|
||||
1. Copy the SQL file (`template/partitions.get_all.sql`) into a directory on your Agent machine. E.g., `/etc/zabbix/zabbix_agent2.d/postgresql/`.
|
||||
2. Install zabbix-agent2-plugin-postgresql package.
|
||||
3. Open your Plugin configuration file `/etc/zabbix/zabbix_agent2.d/plugins.d/postgresql.conf` and add these lines to establish your custom query module AND a secure named session (e.g., `AWS_RDS`). Adjust the parameters to match your environment. You can use uri instead of named session if you want. In this case you will need to modify the item keys to use the correct parameters.
|
||||
```ini
|
||||
# 1. Enable Loadable Custom Queries (Mandatory in Zabbix 7.4+)
|
||||
Plugins.PostgreSQL.CustomQueriesPath=/etc/zabbix/zabbix_agent2.d/postgresql/
|
||||
Plugins.PostgreSQL.CustomQueriesEnabled=true
|
||||
|
||||
# 2. Establish a Secure Backend Session
|
||||
Plugins.PostgreSQL.Sessions.AWS_RDS.Uri=tcp://your-cluster-endpoint.amazonaws.com:5432
|
||||
Plugins.PostgreSQL.Sessions.AWS_RDS.User=zabbix
|
||||
Plugins.PostgreSQL.Sessions.AWS_RDS.Password=<YOUR_ZABBIX_PASSWORD>
|
||||
Plugins.PostgreSQL.Sessions.AWS_RDS.TLSConnect=verify_full
|
||||
Plugins.PostgreSQL.Sessions.AWS_RDS.TLSCAFile=/etc/zabbix/global-bundle.pem
|
||||
```
|
||||
4. Restart your agent to apply the changes:
|
||||
```bash
|
||||
systemctl restart zabbix-agent2
|
||||
```
|
||||
5. Import the `zbx_pg_partitions_monitor_agent2.yaml` template into your Zabbix.
|
||||
6. Link the template to your Host, navigate to its "Macros" tab, and define the needed macros (in this case it's just named session):
|
||||
* `{$PG.CONNSTRING.AGENT2}`: `AWS_RDS`
|
||||
6
postgresql/template/partitions.get_all.sql
Normal file
6
postgresql/template/partitions.get_all.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
SELECT
|
||||
table_name,
|
||||
future_partitions,
|
||||
total_size_bytes,
|
||||
EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds
|
||||
FROM partitions.monitoring;
|
||||
105
postgresql/template/zbx_pg_partitions_monitor_agent2.yaml
Normal file
105
postgresql/template/zbx_pg_partitions_monitor_agent2.yaml
Normal file
@@ -0,0 +1,105 @@
|
||||
zabbix_export:
|
||||
version: '7.0'
|
||||
template_groups:
|
||||
- uuid: 748ad4d098d447d492bb935c907f652f
|
||||
name: Templates/Databases
|
||||
templates:
|
||||
- uuid: a1d5f8c3b2e44a7c9d6b1f2e8a3c5b4d
|
||||
template: 'PostgreSQL Partitioning by Zabbix Agent 2'
|
||||
name: 'PostgreSQL Partitioning by Zabbix Agent 2'
|
||||
description: 'Monitors the custom partitions.monitoring view via the native Zabbix Agent 2 PostgreSQL plugin. Using a single master to minimize the DB connections and load.'
|
||||
vendor:
|
||||
name: Zabbix Support
|
||||
version: 7.0-0
|
||||
groups:
|
||||
- name: Templates/Databases
|
||||
items:
|
||||
- uuid: b8c7d6e5f4a34b2c8d2e3f4a5b6c7d8e
|
||||
name: 'PostgreSQL: Get Partitioning Data'
|
||||
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
|
||||
history: '0'
|
||||
value_type: TEXT
|
||||
description: 'Master item that queries all partition statistics in a single bulk JSON sequence.'
|
||||
tags:
|
||||
- tag: component
|
||||
value: raw
|
||||
discovery_rules:
|
||||
- uuid: b7c2a5d8f1e44b9c8a3f6d2e1c5b4a7d
|
||||
name: 'Partitioned Tables Discovery'
|
||||
type: DEPENDENT
|
||||
key: db.partitions.discovery.dependent
|
||||
item_prototypes:
|
||||
- uuid: f1a2b3c4d5e64f7a9b8c7d6e5f4a3b2c
|
||||
name: '{#TABLE_NAME}: Time Since Last Maintenance'
|
||||
type: DEPENDENT
|
||||
key: 'db.partitions.age["{#TABLE_NAME}"]'
|
||||
units: s
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- '$.[?(@.table_name == "{#TABLE_NAME}")].age_seconds.first()'
|
||||
master_item:
|
||||
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
|
||||
tags:
|
||||
- tag: metric
|
||||
value: age
|
||||
- tag: table
|
||||
value: '{#TABLE_NAME}'
|
||||
trigger_prototypes:
|
||||
- uuid: a9b8c7d6e5f44a3b8c1d2e3f4a5b6c7d
|
||||
expression: 'last(/PostgreSQL Partitioning by Zabbix Agent 2/db.partitions.age["{#TABLE_NAME}"])>{$PARTITIONS.AGE}'
|
||||
name: 'Table {#TABLE_NAME}: Maintenance script has not run successfully in over 48 hours'
|
||||
priority: WARNING
|
||||
- uuid: c4b9e2a5f1d84c7a9f3b6d1e5a2c8b4d
|
||||
name: '{#TABLE_NAME}: Future Partitions Buffer'
|
||||
type: DEPENDENT
|
||||
key: 'db.partitions.future["{#TABLE_NAME}"]'
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- '$.[?(@.table_name == "{#TABLE_NAME}")].future_partitions.first()'
|
||||
master_item:
|
||||
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
|
||||
tags:
|
||||
- tag: metric
|
||||
value: partitions
|
||||
- tag: table
|
||||
value: '{#TABLE_NAME}'
|
||||
trigger_prototypes:
|
||||
- uuid: d6e3a5c8b2f14d9e8a7b6c5d4e3f2a1b
|
||||
expression: 'last(/PostgreSQL Partitioning by Zabbix Agent 2/db.partitions.future["{#TABLE_NAME}"])<{$PARTITIONS.LOW}'
|
||||
name: 'Table {#TABLE_NAME}: Future partitions buffer is critically low (< 2)'
|
||||
priority: HIGH
|
||||
- uuid: e8f2a1b3c4d54e6f9a8b7c6d5e4f3a2b
|
||||
name: '{#TABLE_NAME}: Total Size Bytes'
|
||||
type: DEPENDENT
|
||||
key: 'db.partitions.size["{#TABLE_NAME}"]'
|
||||
units: B
|
||||
preprocessing:
|
||||
- type: JSONPATH
|
||||
parameters:
|
||||
- '$.[?(@.table_name == "{#TABLE_NAME}")].total_size_bytes.first()'
|
||||
master_item:
|
||||
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
|
||||
tags:
|
||||
- tag: metric
|
||||
value: size
|
||||
- tag: table
|
||||
value: '{#TABLE_NAME}'
|
||||
master_item:
|
||||
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
|
||||
lld_macro_paths:
|
||||
- lld_macro: '{#TABLE_NAME}'
|
||||
path: $.table_name
|
||||
macros:
|
||||
- macro: '{$PARTITIONS.AGE}'
|
||||
value: 24h
|
||||
description: 'The maximum period during which no new partitions may be created'
|
||||
- macro: '{$PARTITIONS.LOW}'
|
||||
value: '2'
|
||||
description: 'The minimum number of partitions that must exist in the future'
|
||||
- macro: '{$PG.CONNSTRING.AGENT2}'
|
||||
value: AWS_RDS
|
||||
description: 'Session name or URI of the PostgreSQL instance'
|
||||
- macro: '{$PG.DBNAME}'
|
||||
value: zabbix
|
||||
63
postgresql/tests/ARCHITECTURE.md
Normal file
63
postgresql/tests/ARCHITECTURE.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Zabbix PostgreSQL Partitioning Architecture
|
||||
|
||||
This document provides a brief technical overview of the components, logic, and dynamic querying mechanisms that power the PostgreSQL partitioning solution for Zabbix.
|
||||
|
||||
## Schema-Agnostic Design
|
||||
|
||||
A core architectural principle of this solution is its **schema-agnostic design**. It does not assume that your Zabbix database is installed in the default `public` schema.
|
||||
|
||||
When the procedures need to create, drop, or manipulate a partitioned table (e.g., `history`), they do not hardcode the schema. Instead, they dynamically query PostgreSQL's internal system catalogs (`pg_class` and `pg_namespace`) to locate exactly which schema the target table belongs to:
|
||||
|
||||
```sql
|
||||
SELECT n.nspname INTO v_schema
|
||||
FROM pg_class c
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE c.relname = v_table;
|
||||
```
|
||||
|
||||
This ensures that the partitioning scripts will work flawlessly, even in custom Zabbix deployments where tables are housed in alternative schemas.
|
||||
|
||||
## File Structure & Queries
|
||||
|
||||
The solution is divided into a series of SQL scripts that must be executed sequentially to set up the environment.
|
||||
|
||||
### 1. `00_schema_create.sql`
|
||||
* **Purpose:** Initializes the foundation for the partitioning system.
|
||||
* **Actions:**
|
||||
* Creates the isolated `partitions` schema to keep everything separate from Zabbix's own structure.
|
||||
* Creates the `partitions.config` table (which stores retention policies).
|
||||
* Creates the `partitions.version` table for tracking the installed version.
|
||||
|
||||
### 2. `01_auditlog_prep.sql`
|
||||
* **Purpose:** Prepares the Zabbix `auditlog` table for partitioning.
|
||||
* **Actions:**
|
||||
* PostgreSQL range partitioning requires the partition key (in this case, `clock`) to be part of the Primary Key.
|
||||
* This script dynamically locates the existing Primary Key (usually just `auditid`) and alters it to a composite key `(auditid, clock)`.
|
||||
|
||||
### 3. `01_maintenance.sql`
|
||||
* **Purpose:** Contains the core PL/pgSQL procedural logic that manages the lifecycle of the partitions.
|
||||
* **Key Functions/Procedures:**
|
||||
* `partition_exists()`: Queries `pg_class` to verify if a specific child partition partition exists.
|
||||
* `create_partition()`: Executes the DDL `CREATE TABLE ... PARTITION OF ... FOR VALUES FROM (x) TO (y)` to generate a new time-bound chunk.
|
||||
* `drop_old_partitions()`: Iterates over existing child partitions (using `pg_inherits`) and calculates their age based on their suffix. Drops those older than the defined `keep_history` policy.
|
||||
* `maintain_table()`: The orchestrator for a single table. It calculates the necessary UTC timestamps, calls `create_partition()` to build the future buffer, calls `create_partition()` recursively backward to cover the retention period, and finally calls `drop_old_partitions()`.
|
||||
* `run_maintenance()`: The global loop that iterates through `partitions.config` and triggers `maintain_table()` for every configured Zabbix table.
|
||||
|
||||
### 4. `02_enable_partitioning.sql`
|
||||
* **Purpose:** The migration script that actually executes the partition conversion on the live database.
|
||||
* **Actions:**
|
||||
* It takes the original Zabbix table (e.g., `history`) and renames it to `history_old` (`ALTER TABLE ... RENAME TO ...`).
|
||||
* It immediately creates a new partitioned table with the original name, inheriting the exact structure of the old table (`CREATE TABLE ... (LIKE ... INCLUDING ALL) PARTITION BY RANGE (clock)`).
|
||||
* It triggers the first maintenance run so new incoming data has immediate partitions to land in.
|
||||
|
||||
### 5. `03_monitoring_view.sql`
|
||||
* **Purpose:** Provides an easy-to-read observability layer.
|
||||
* **Actions:**
|
||||
* Creates the `partitions.monitoring` view by joining `pg_class`, `pg_inherits`, `pg_tablespace`, and `pg_size_pretty`.
|
||||
* This view aggregates the total size of each partitioned family and calculates how many "future partitions" exist as a safety buffer.
|
||||
|
||||
## Automated Scheduling (`pg_cron`)
|
||||
|
||||
While `systemd` timers or standard `cron` can be used to trigger the maintenance, the recommended approach (especially for AWS RDS/Aurora deployments) is using the `pg_cron` database extension.
|
||||
|
||||
`pg_cron` allows you to schedule the `CALL partitions.run_maintenance();` procedure directly within PostgreSQL, ensuring the database autonomously manages its own housekeeping without requiring external OS-level access or triggers.
|
||||
90
postgresql/tests/QUICKSTART.md
Normal file
90
postgresql/tests/QUICKSTART.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Quickstart (PostgreSQL Partitioning Test)
|
||||
|
||||
## Start Environment
|
||||
> **Note**: If `docker` commands fail with permission errors, run `newgrp docker` or ensure your user is in the `docker` group (`sudo usermod -aG docker $USER`) and log out/in.
|
||||
|
||||
```bash
|
||||
cd postgresql/docker
|
||||
sudo ./run_test_env.sh --pg 16 --zabbix 7.0
|
||||
# Options: --pg <16|17|18> --zabbix <7.0|7.4>
|
||||
```
|
||||
|
||||
## Verify
|
||||
```bash
|
||||
# Check status
|
||||
docker ps
|
||||
|
||||
# SQL Shell
|
||||
docker exec -it zabbix-db-test psql -U zabbix -d zabbix
|
||||
# Password: zabbix
|
||||
```
|
||||
|
||||
## Reset
|
||||
```bash
|
||||
docker compose down -v
|
||||
```
|
||||
|
||||
## Partitioning
|
||||
See [ARCHITECTURE.md](../ARCHITECTURE.md) for details on the implemented declarative partitioning.
|
||||
|
||||
## AWS RDS / External Database Testing
|
||||
|
||||
You can run these partitioning tests against a real AWS RDS (or any external PostgreSQL instance).
|
||||
|
||||
### 1. Configure Credentials
|
||||
First, create a `db_credentials` file in the `postgresql/` directory. (This file is ignored by Git to keep your passwords safe).
|
||||
Example `postgresql/db_credentials`:
|
||||
```bash
|
||||
# Admin credentials
|
||||
export DB_HOST="your-rds-endpoint.rds.amazonaws.com"
|
||||
export DB_PORT="5432"
|
||||
export DB_NAME="postgres"
|
||||
export DB_USER="postgres"
|
||||
export DB_PASSWORD="your_admin_password"
|
||||
|
||||
# SSL Configuration
|
||||
export DB_SSL_MODE="verify-full"
|
||||
export DB_PEM_URL="https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem"
|
||||
export DB_SSL_ROOT_CERT="./global-bundle.pem"
|
||||
|
||||
# Zabbix credentials to be created
|
||||
export ZBX_DB_NAME="zabbix"
|
||||
export ZBX_DB_USER="zabbix"
|
||||
export ZBX_DB_PASSWORD="zabbix_password"
|
||||
```
|
||||
|
||||
### 2. Automated Testing
|
||||
You can run the same automated deployment script, but instruct it to deploy directly to your RDS instance instead of a local Docker container:
|
||||
|
||||
```bash
|
||||
cd postgresql/docker
|
||||
./run_test_env.sh --pg 16 --zabbix 7.0 --rds
|
||||
```
|
||||
|
||||
If you want to completely clean up the RDS database and start fresh (terminating existing connections and dropping all data), use the `--rds-drop` flag. You will be prompted to type `yes` to safely confirm the deletion:
|
||||
```bash
|
||||
./run_test_env.sh --pg 16 --zabbix 7.0 --rds-drop
|
||||
```
|
||||
|
||||
### 3. Manual Setup & Zabbix Integration
|
||||
If you want to prepare the real database for your Production Zabbix Server manually, you can just run the initialization script directly:
|
||||
|
||||
```bash
|
||||
cd postgresql
|
||||
./setup_rds.sh
|
||||
# To drop an existing database and start fresh, use:
|
||||
# ./setup_rds.sh --drop
|
||||
```
|
||||
|
||||
The script will automatically connect as the `postgres` user, conditionally download the SSL certificates if needed, and set up the `zabbix` user and database.
|
||||
Upon success, the script will output the exact block you need to copy into your `zabbix_server.conf`, e.g.:
|
||||
|
||||
```ini
|
||||
DBHost=your-rds-endpoint.rds.amazonaws.com
|
||||
DBName=zabbix
|
||||
DBUser=zabbix
|
||||
DBPassword=zabbix_password
|
||||
DBPort=5432
|
||||
DBTLSConnect=verify_full
|
||||
DBTLSCAFile=/full/path/to/global-bundle.pem
|
||||
```
|
||||
@@ -67,17 +67,17 @@ if [[ -f "$SQL_DIR/schema.sql" ]]; then
|
||||
cp "$SQL_DIR/schema.sql" ./init_scripts/01_00_schema.sql
|
||||
|
||||
# 1.1 Partitioning Infrastructure
|
||||
if [[ -f "../procedures/00_schema_create.sql" ]]; then
|
||||
cp "../procedures/00_schema_create.sql" ./init_scripts/01_10_schema_create.sql
|
||||
if [[ -f "../../procedures/00_schema_create.sql" ]]; then
|
||||
cp "../../procedures/00_schema_create.sql" ./init_scripts/01_10_schema_create.sql
|
||||
fi
|
||||
if [[ -f "../procedures/01_maintenance.sql" ]]; then
|
||||
cp "../procedures/01_maintenance.sql" ./init_scripts/01_30_maintenance.sql
|
||||
if [[ -f "../../procedures/01_maintenance.sql" ]]; then
|
||||
cp "../../procedures/01_maintenance.sql" ./init_scripts/01_30_maintenance.sql
|
||||
fi
|
||||
if [[ -f "../procedures/02_enable_partitioning.sql" ]]; then
|
||||
cp "../procedures/02_enable_partitioning.sql" ./init_scripts/01_40_enable.sql
|
||||
if [[ -f "../../procedures/02_enable_partitioning.sql" ]]; then
|
||||
cp "../../procedures/02_enable_partitioning.sql" ./init_scripts/01_40_enable.sql
|
||||
fi
|
||||
if [[ -f "../procedures/03_monitoring_view.sql" ]]; then
|
||||
cp "../procedures/03_monitoring_view.sql" ./init_scripts/01_50_monitoring.sql
|
||||
if [[ -f "../../procedures/03_monitoring_view.sql" ]]; then
|
||||
cp "../../procedures/03_monitoring_view.sql" ./init_scripts/01_50_monitoring.sql
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}Error: schema.sql not found in $SQL_DIR${NC}"
|
||||
Reference in New Issue
Block a user