8 Commits

12 changed files with 127 additions and 398 deletions

13
.gitignore vendored
View File

@@ -1,2 +1,11 @@
db_credentials # Docker environment
global-bundle.pem docker/
z_gen_history_data.sql
# Local docs
QUICKSTART.md
init_extra_users.sql
ARCHITECTURE.md
# Schemas
sql-scripts*/

View File

@@ -1,8 +0,0 @@
# PostgreSQL Partitioning for Zabbix
This directory contains solutions for partitioning a Zabbix database running on PostgreSQL. Partitioning is essential for large Zabbix environments as it eliminates the need for the built-in Zabbix Housekeeper to aggressively delete old data row-by-row, replacing it with instant DDL operations that drop entire daily or monthly chunks.
## Implementations
- **[procedures](procedures/)**: The recommended Declarative (SQL-based) implementation. It uses native PostgreSQL procedures and features like `pg_cron` for entirely self-contained maintenance.
- **[script](script/)**: External script-based management solution. (Coming soon)

View File

@@ -8,7 +8,7 @@ CREATE SCHEMA IF NOT EXISTS partitions;
-- Configuration table to store partitioning settings per table -- Configuration table to store partitioning settings per table
CREATE TABLE IF NOT EXISTS partitions.config ( CREATE TABLE IF NOT EXISTS partitions.config (
table_name text NOT NULL, table_name text NOT NULL,
period text NOT NULL, period text NOT NULL CHECK (period IN ('day', 'week', 'month', 'year')),
keep_history interval NOT NULL, keep_history interval NOT NULL,
future_partitions integer NOT NULL DEFAULT 5, future_partitions integer NOT NULL DEFAULT 5,
last_updated timestamp WITH TIME ZONE DEFAULT (now() AT TIME ZONE 'UTC'), last_updated timestamp WITH TIME ZONE DEFAULT (now() AT TIME ZONE 'UTC'),
@@ -22,7 +22,8 @@ CREATE TABLE IF NOT EXISTS partitions.version (
description text description text
); );
INSERT INTO partitions.version (version, description) VALUES ('7-1', 'Zabbix 7.4 and 7.0 compatible version') -- Set initial version
INSERT INTO partitions.version (version, description) VALUES ('1.0', 'Initial release')
ON CONFLICT (version) DO NOTHING; ON CONFLICT (version) DO NOTHING;
-- Default configuration for Zabbix tables (adjust as needed) -- Default configuration for Zabbix tables (adjust as needed)
@@ -32,7 +33,8 @@ INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('history_uint', 'day', '30 days'), ('history_uint', 'day', '30 days'),
('history_str', 'day', '30 days'), ('history_str', 'day', '30 days'),
('history_log', 'day', '30 days'), ('history_log', 'day', '30 days'),
('history_text', 'day', '30 days') ('history_text', 'day', '30 days'),
('history_json', 'day', '30 days')
ON CONFLICT (table_name) DO NOTHING; ON CONFLICT (table_name) DO NOTHING;
-- Trends tables: Monthly partitions, keep 12 months -- Trends tables: Monthly partitions, keep 12 months

View File

@@ -0,0 +1,26 @@
-- ============================================================================
-- Modifies the 'auditlog' table Primary Key to include 'clock'.
-- This is REQUIRED for range partitioning by 'clock'.
-- ============================================================================
DO $$
BEGIN
-- Check if PK needs modification
-- Original PK is on auditid named 'auditlog_pkey'
IF EXISTS (
SELECT 1 FROM pg_constraint
WHERE conname = 'auditlog_pkey'
AND conrelid = 'auditlog'::regclass
) THEN
-- Verify if 'clock' is already in PK (basic safety check)
-- Realistically, if 'auditlog_pkey' exists on default Zabbix, it's just auditid.
RAISE NOTICE 'Dropping existing Primary Key on auditlog...';
ALTER TABLE auditlog DROP CONSTRAINT auditlog_pkey;
RAISE NOTICE 'Creating new Primary Key on auditlog (auditid, clock)...';
ALTER TABLE auditlog ADD PRIMARY KEY (auditid, clock);
ELSE
RAISE NOTICE 'Constraint auditlog_pkey not found. Skipping or already modified.';
END IF;
END $$;

View File

@@ -37,14 +37,11 @@ BEGIN
IF NOT FOUND THEN IF NOT FOUND THEN
RAISE EXCEPTION 'Parent table % not found', p_parent_table; RAISE EXCEPTION 'Parent table % not found', p_parent_table;
END IF; END IF;
-- (No changes needed for time here as passed params are already UTC-adjusted in caller)
v_start_ts := extract(epoch from p_start_time)::bigint; v_start_ts := extract(epoch from p_start_time)::bigint;
v_end_ts := extract(epoch from p_end_time)::bigint; v_end_ts := extract(epoch from p_end_time)::bigint;
IF p_period = 'month' THEN IF p_period = 'month' THEN
v_suffix := to_char(p_start_time, 'YYYYMM'); v_suffix := to_char(p_start_time, 'YYYYMM');
ELSIF p_period LIKE '%hour%' THEN
v_suffix := to_char(p_start_time, 'YYYYMMDDHH24');
ELSE ELSE
v_suffix := to_char(p_start_time, 'YYYYMMDD'); v_suffix := to_char(p_start_time, 'YYYYMMDD');
END IF; END IF;
@@ -93,47 +90,26 @@ BEGIN
BEGIN BEGIN
IF length(v_suffix) = 6 THEN -- YYYYMM IF length(v_suffix) = 6 THEN -- YYYYMM
v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC'; v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC';
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD -- For monthly, we check if the END of the month is older than retention?
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC'; -- Or just strict retention.
ELSIF length(v_suffix) = 10 THEN -- YYYYMMDDHH -- To be safe, adding 1 month to check vs cutoff.
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDDHH24') AT TIME ZONE 'UTC';
ELSE
CONTINUE; -- Ignore non-matching suffix lengths
END IF;
EXCEPTION WHEN OTHERS THEN
-- Safely ignore parsing errors for oddly named partitions
CONTINUE;
END;
-- Now check retention and execute DROP TABLE (so dropping errors are correctly raised!)
IF length(v_suffix) = 6 THEN -- YYYYMM
IF extract(epoch from (v_partition_date + '1 month'::interval)) < v_cutoff_ts THEN IF extract(epoch from (v_partition_date + '1 month'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name; RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name); EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name);
COMMIT; -- Release lock immediately COMMIT; -- Release lock immediately
END IF; END IF;
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD
-- If period is weekly, the partition spans an entire week. Otherwise, it spans one day. v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC';
IF p_period = 'week' THEN
IF extract(epoch from (v_partition_date + '1 week'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
ELSE
IF extract(epoch from (v_partition_date + '1 day'::interval)) < v_cutoff_ts THEN IF extract(epoch from (v_partition_date + '1 day'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name; RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name); EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name);
COMMIT; -- Release lock immediately COMMIT; -- Release lock immediately
END IF; END IF;
END IF; END IF;
ELSIF length(v_suffix) = 10 THEN -- YYYYMMDDHH EXCEPTION WHEN OTHERS THEN
IF extract(epoch from (v_partition_date + p_period::interval)) < v_cutoff_ts THEN -- Ignore parsing errors for non-standard partitions
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name; NULL;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name); END;
COMMIT; -- Release lock immediately
END IF;
END IF;
END LOOP; END LOOP;
END; END;
$$; $$;
@@ -168,14 +144,8 @@ BEGIN
v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC'); v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC');
-- Approximate 30 days per month (2592000 seconds) -- Approximate 30 days per month (2592000 seconds)
v_past_iterations := ceil(extract(epoch from p_keep_history) / 2592000)::integer; v_past_iterations := ceil(extract(epoch from p_keep_history) / 2592000)::integer;
ELSIF p_period LIKE '%hour%' THEN
v_period_interval := p_period::interval;
v_start_time := date_trunc('hour', now() AT TIME ZONE 'UTC');
v_past_iterations := ceil(extract(epoch from p_keep_history) / extract(epoch from v_period_interval))::integer;
ELSE ELSE
RAISE EXCEPTION 'Unsupported partitioning period: %', p_period; RETURN;
END IF; END IF;
-- 1. Create Future Partitions (Current + Buffer) -- 1. Create Future Partitions (Current + Buffer)

View File

@@ -1,5 +1,5 @@
-- ============================================================================ -- ============================================================================
-- Converts standard Zabbix tables to Partitioned tables. -- Converts Zabbix tables to Partitioned tables.
-- WARNING: This renames existing tables to *_old. -- WARNING: This renames existing tables to *_old.
-- ============================================================================ -- ============================================================================
@@ -22,24 +22,15 @@ BEGIN
WHERE c.relname = v_table; WHERE c.relname = v_table;
-- Check if table exists and is NOT already partitioned
IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r') THEN IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r') THEN
RAISE NOTICE 'Converting table % to partitioned table...', v_table; RAISE NOTICE 'Converting table % to partitioned table...', v_table;
-- 1. Rename existing table -- 1. Rename existing table
EXECUTE format('ALTER TABLE %I.%I RENAME TO %I', v_schema, v_table, v_old_table); EXECUTE format('ALTER TABLE %I.%I RENAME TO %I', v_schema, v_table, v_old_table);
-- 2. Create new partitioned table (handling auditlog PK uniquely) -- 2. Create new partitioned table (copying structure)
IF v_table = 'auditlog' THEN
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING DEFAULTS INCLUDING COMMENTS) PARTITION BY RANGE (clock)', v_schema, v_table, v_schema, v_old_table);
EXECUTE format('ALTER TABLE %I.%I ADD PRIMARY KEY (auditid, clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_p_1 ON %I.%I (userid, clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_p_2 ON %I.%I (clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_p_3 ON %I.%I (resourcetype, resourceid)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_p_4 ON %I.%I (recordsetid)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_p_5 ON %I.%I (ip)', v_schema, v_table);
ELSE
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING ALL) PARTITION BY RANGE (clock)', v_schema, v_table, v_schema, v_old_table); EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING ALL) PARTITION BY RANGE (clock)', v_schema, v_table, v_schema, v_old_table);
END IF;
-- 3. Create initial partitions -- 3. Create initial partitions
RAISE NOTICE 'Creating initial partitions for %...', v_table; RAISE NOTICE 'Creating initial partitions for %...', v_table;

View File

@@ -2,8 +2,7 @@
-- Creates a view to monitor partition status and sizes. -- Creates a view to monitor partition status and sizes.
-- ============================================================================ -- ============================================================================
DROP VIEW IF EXISTS partitions.monitoring; CREATE OR REPLACE VIEW partitions.monitoring AS
CREATE VIEW partitions.monitoring AS
SELECT SELECT
parent.relname AS parent_table, parent.relname AS parent_table,
c.table_name, c.table_name,
@@ -12,15 +11,10 @@ SELECT
count(child.relname) AS partition_count, count(child.relname) AS partition_count,
count(child.relname) FILTER ( count(child.relname) FILTER (
WHERE WHERE
(c.period = 'day' AND child.relname > (parent.relname || '_p' || to_char(now() AT TIME ZONE 'UTC', 'YYYYMMDD'))) (c.period = 'day' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMMDD')))
OR OR
(c.period = 'month' AND child.relname > (parent.relname || '_p' || to_char(now() AT TIME ZONE 'UTC', 'YYYYMM'))) (c.period = 'month' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMM')))
OR
(c.period = 'week' AND child.relname > (parent.relname || '_p' || to_char(date_trunc('week', now() AT TIME ZONE 'UTC'), 'YYYYMMDD')))
OR
(c.period LIKE '%hour%' AND child.relname > (parent.relname || '_p' || to_char(now() AT TIME ZONE 'UTC', 'YYYYMMDDHH24')))
) AS future_partitions, ) AS future_partitions,
sum(pg_total_relation_size(child.oid)) AS total_size_bytes,
pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size, pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size,
min(child.relname) AS oldest_partition, min(child.relname) AS oldest_partition,
max(child.relname) AS newest_partition, max(child.relname) AS newest_partition,

View File

@@ -1,27 +1,12 @@
# PostgreSQL Partitioning for Zabbix # PostgreSQL Partitioning for Zabbix
This is the declarative partitioning implementation for Zabbix `history*`, `trends*`, and `auditlog` tables on PostgreSQL. This solution is intended to replace standard Zabbix housekeeping for the configured tables. Partitioning is very useful for large environments because it completely eliminates the housekeeper from the process. Instead of huge DELETE queries on several million rows, fast DDL queries (ALTER TABLE) are executed, which drop an entire partition. This is the declarative (PostgreSQL procedures based) partitioning implementation for Zabbix `history`, `trends`, and `auditlog` tables on PostgreSQL. This solution is intended to replace standard Zabbix housekeeping for the configured tables. Partitioning is very useful for large environments because it completely eliminates the housekeeper from the process. Instead of huge DELETE queries on several million rows, fast DDL queries (ALTER TABLE) are executed, which drop an entire partition.
> [!WARNING] > [!WARNING]
> **High-Load Environments**:
> 1. **Data Visibility**: After enabling partitioning, old data remains in `*_old` tables and is **NOT visible** in Zabbix. You must migrate data manually if needed. > 1. **Data Visibility**: After enabling partitioning, old data remains in `*_old` tables and is **NOT visible** in Zabbix. You must migrate data manually if needed.
> 2. **Disable Housekeeping**: You **MUST** disable Zabbix Housekeeper for History and Trends in *Administration -> Housekeeping*. > 2. **Disable Housekeeping**: You **MUST** disable Zabbix Housekeeper for History and Trends in *Administration -> Housekeeping*. Failure to do so will cause massive `DELETE` loads.
## Table of Contents
- [Architecture](#architecture)
- [Components](#components)
- [Installation](#installation)
- [Configuration](#configuration)
- [Modifying Retention](#modifying-retention)
- [Maintenance](#maintenance)
- [Scheduling Maintenance](#scheduling-maintenance)
- [Monitoring & Permissions](#monitoring--permissions)
- [Versioning](#versioning)
- [Least Privilege Access (`zbxpart_monitor`)](#least-privilege-access-zbxpart_monitor)
- [Implementation Details](#implementation-details)
- [`auditlog` Table](#auditlog-table)
- [Converting Existing Tables](#converting-existing-tables)
- [Upgrades](#upgrades)
## Architecture ## Architecture
@@ -37,26 +22,11 @@ All procedures, information, statistics and configuration are stored in the `par
## Installation ## Installation
The installation is performed by executing the SQL procedures in the following order: The installation is performed by executing the SQL procedures in the following order:
1. Initialize schema (`00_schema_create.sql`). 1. Initialize schema (`00_partitions_init.sql`).
2. Install maintenance procedures (`01_maintenance.sql`). 2. Auditlog PK adjustment (`01_auditlog_prep.sql`).
3. Enable partitioning on tables (`02_enable_partitioning.sql`). 3. Install maintenance procedures (`02_maintenance.sql`).
4. Install monitoring views (`03_monitoring_view.sql`). 4. Enable partitioning on tables (`03_enable_partitioning.sql`).
5. Install monitoring views (`04_monitoring_view.sql`).
**Command Example:**
You can deploy these scripts manually against your Zabbix database using `psql`. Navigate to the `procedures/` directory and run:
```bash
# Connect as the zabbix database user
export PGPASSWORD="your_zabbix_password"
DB_HOST="localhost" # Or your DB endpoint
DB_NAME="zabbix"
DB_USER="zbxpart_admin"
for script in 00_schema_create.sql 01_maintenance.sql 02_enable_partitioning.sql 03_monitoring_view.sql; do
echo "Applying $script..."
psql -h $DB_HOST -U $DB_USER -d $DB_NAME -f "$script"
done
```
## Configuration ## Configuration
@@ -91,123 +61,82 @@ This procedure should be scheduled to run periodically (e.g., daily via `pg_cron
```sql ```sql
CALL partitions.run_maintenance(); CALL partitions.run_maintenance();
``` ```
### Scheduling Maintenance ### Automatic Maintenance
To ensure partitions are created in advance and old data is cleaned up, the maintenance procedure should be scheduled to run automatically. To ensure partitions are created in advance and old data is cleaned up, the maintenance procedure should be scheduled to run automatically.
It is recommended to run the maintenance **twice a day** and not in round hours because of the way housekeeper works (e.g., at 05:30 and 23:30). It is recommended to run the maintenance **twice a day** (e.g., at 05:30 and 23:30).
* **Primary Run**: Creates new future partitions and drops old ones. * **Primary Run**: Creates new future partitions and drops old ones.
* **Secondary Run**: Acts as a safety check. Since the procedure is idempotent (safe to run multiple times), a second run ensures everything is consistent if the first run failed or was interrupted. * **Secondary Run**: Acts as a safety check. Since the procedure is idempotent (safe to run multiple times), a second run ensures everything is consistent if the first run failed or was interrupted.
You can schedule this using one of the following methods: There are three ways to schedule this, depending on your environment:
#### Option 1: `pg_cron` (Recommended) #### Option 1: `pg_cron` (If you use RDS/Aurora)
`pg_cron` is a cron-based job scheduler that runs directly inside the database as an extension. It is very useful for cloud based databases like AWS RDS, Aurora, Azure, GCP, because it handles the authentication/connections securely for you automatically and its available as a managed extension. You do **not** need to install OS packages or configure anything. Simply modify the RDS Parameter Group to include `shared_preload_libraries = 'pg_cron'` and `cron.database_name = 'zabbix'`, reboot the instance, and execute `CREATE EXTENSION pg_cron;`. If you are running on managed PostgreSQL (like AWS Aurora) or prefer to keep scheduling inside the database, `pg_cron` is the way to go.
**Setup `pg_cron` (Self-Hosted):** 1. Ensure `pg_cron` is installed and loaded in `postgresql.conf` (`shared_preload_libraries = 'pg_cron'`).
1. Install the package via your OS package manager (e.g., `postgresql-15-cron` on Debian/Ubuntu, or `pg_cron_15` on RHEL/CentOS). 2. Run the following to schedule the maintenance:
2. Configure it modifying `postgresql.conf`: ```sql
```ini CREATE EXTENSION IF NOT EXISTS pg_cron;
shared_preload_libraries = 'pg_cron' SELECT cron.schedule('zabbix_maintenance', '30 5,23 * * *', 'CALL partitions.run_maintenance();');
cron.database_name = 'zabbix'
```
3. Restart PostgreSQL:
```bash
systemctl restart postgresql
```
4. Connect to your `zabbix` database as a superuser and create the extension:
```sql
CREATE EXTENSION pg_cron;
```
5. Schedule the job to run:
```sql
SELECT cron.schedule('zabbix_partition_maintenance', '30 5,23 * * *', 'CALL partitions.run_maintenance();');
```
**⚠️ Troubleshooting `pg_cron` Connection Errors:**
If your cron jobs fail to execute and you see `FATAL: password authentication failed` in your PostgreSQL logs, it is because `pg_cron` attempts to connect via TCP (`localhost`) by default, which usually requires a password.
**Solution A: Use Local Unix Sockets (Easier)**
Edit your `postgresql.conf` to force `pg_cron` to use the local Unix socket (which uses passwordless `peer` authentication):
```ini
cron.host = '/var/run/postgresql' # Or '/tmp', depending on your OS
``` ```
*(Restart PostgreSQL after making this change).* *Where:*
* `'zabbix_maintenance'` - The name of the job (must be unique).
* `'30 5,23 * * *'` - The standard cron schedule (runs at 05:30 and 23:30 daily).
* `'CALL partitions.run_maintenance();'` - The SQL command to execute.
**Solution B: Provide a Password (`.pgpass`)**
If you *must* connect via TCP with a specific database user and password, the `pg_cron` background worker needs a way to authenticate. You provide this by creating a `.pgpass` file for the OS `postgres` user.
1. Switch to the OS database user:
```bash
sudo su - postgres
```
2. Create or append your database credentials to `~/.pgpass` using the format `hostname:port:database:username:password`:
```bash
echo "localhost:5432:zabbix:zabbix:my_secure_password" >> ~/.pgpass
```
3. Set strict permissions (PostgreSQL will ignore the file if permissions are too loose):
```bash
chmod 0600 ~/.pgpass
```
**Managing `pg_cron` Jobs:** #### Option 2: `systemd` Timers
If you need to verify or manage your scheduled jobs (run as superuser): For standard Linux VM deployments, `systemd` timers are modern, prevent overlapping runs, and provide excellent logging.
- To **list all active schedules**: `SELECT * FROM cron.job;`
- To **view execution logs/history**: `SELECT * FROM cron.job_run_details;`
- To **remove/unschedule** the job: `SELECT cron.unschedule('zabbix_partition_maintenance');`
#### Option 2: Systemd Timers 1. Create a service file (`/etc/systemd/system/zabbix-partitioning.service`):
Systemd timers provide better logging and error handling properties than standard cron. ```ini
[Unit]
Description=Zabbix PostgreSQL Partition Maintenance
1. Create a service file **`/etc/systemd/system/zabbix-partitions.service`**: [Service]
```ini Type=oneshot
[Unit] User=zabbix
Description=Zabbix PostgreSQL Partition Maintenance # Ensure .pgpass is configured for the zabbix user so it doesn't prompt for a password
After=network.target postgresql.service ExecStart=/usr/bin/psql -U zabbix -d zabbix -c "CALL partitions.run_maintenance();"
```
[Service] 2. Create a timer file (`/etc/systemd/system/zabbix-partitioning.timer`):
Type=oneshot ```ini
User=postgres [Unit]
ExecStart=/usr/bin/psql -d zabbix -c "CALL partitions.run_maintenance();" Description=Zabbix Partitioning twice a day
```
2. Create a timer file **`/etc/systemd/system/zabbix-partitions.timer`**: [Timer]
```ini OnCalendar=*-*-* 05,23:30:00
[Unit] Persistent=true
Description=Run Zabbix Partition Maintenance Twice Daily
[Timer] [Install]
OnCalendar=*-*-* 05:30:00 WantedBy=timers.target
OnCalendar=*-*-* 23:30:00 ```
Persistent=true
[Install]
WantedBy=timers.target
```
3. Enable and start the timer: 3. Enable and start the timer:
```bash ```bash
systemctl daemon-reload systemctl daemon-reload
systemctl enable --now zabbix-partitions.timer systemctl enable --now zabbix-partitioning.timer
``` ```
#### Option 3: System Cron (`crontab`) #### Option 3: Standard Cron
Standard system cron is a simple fallback. This is the legacy, simple method for standard VMs and containerized environments.
**Example Crontab Entry (`crontab -e`):** **Example Crontab Entry (`crontab -e`):**
```bash ```bash
# Run Zabbix partition maintenance twice daily (5:30 AM and 5:30 PM) # Run Zabbix partition maintenance twice daily (5:30 AM and 11:30 PM)
30 5,23 * * * psql -U zabbix -d zabbix -c "CALL partitions.run_maintenance();" >> /var/log/zabbix_maintenance.log 2>&1 30 5,23 * * * psql -U zabbix -d zabbix -c "CALL partitions.run_maintenance();" >> /var/log/zabbix_maintenance.log 2>&1
``` ```
**Docker Environment:** **Docker Environment:**
If running in Docker, you can execute it via the host's cron by targeting the container: If running in Docker, you can execute it via the container's host:
```bash ```bash
30 5,23 * * * docker exec zabbix-db-test psql -U zabbix -d zabbix -c "CALL partitions.run_maintenance();" 30 5,23 * * * docker exec zabbix-db psql -U zabbix -d zabbix -c "CALL partitions.run_maintenance();"
``` ```
## Monitoring & Permissions ## Monitoring & Permissions
System state can be monitored via the `partitions.monitoring` view. It includes the information about number of future partitions and the time since the last maintenance run. Plus it includes the total size of the partitioned table in bytes. System state can be monitored via the `partitions.monitoring` view. It includes a `future_partitions` column which counts how many partitions exist *after* the current period. This is useful for alerting (e.g., trigger if `future_partitions < 2`).
```sql ```sql
SELECT * FROM partitions.monitoring; SELECT * FROM partitions.monitoring;
@@ -219,29 +148,25 @@ To check the installed version of the partitioning solution:
SELECT * FROM partitions.version ORDER BY installed_at DESC LIMIT 1; SELECT * FROM partitions.version ORDER BY installed_at DESC LIMIT 1;
``` ```
### Least Privilege Access (`zbxpart_monitor`) ### Least Privilege Access (`zbx_monitor`)
For monitoring purposes, it is highly recommended to create a dedicated user with read-only access to the monitoring view instead of using the `zbxpart_admin` owner account. For monitoring purposes, it is recommended to create a dedicated user with read-only access to the monitoring view.
```sql ```sql
CREATE USER zbxpart_monitor WITH PASSWORD 'secure_password'; CREATE USER zbx_monitor WITH PASSWORD 'secure_password';
GRANT USAGE ON SCHEMA partitions TO zbxpart_monitor; GRANT USAGE ON SCHEMA partitions TO zbx_monitor;
GRANT SELECT ON partitions.monitoring TO zbxpart_monitor; GRANT SELECT ON partitions.monitoring TO zbx_monitor;
``` ```
> [!WARNING]
> Because `03_monitoring_view.sql` uses a `DROP VIEW` command to apply updates, re-running the script will destroy all previously assigned `GRANT` permissions. If you ever update the view script, you **must** manually re-run the `GRANT SELECT` command above to restore access for the `zbxpart_monitor` user!
## Implementation Details ## Implementation Details
### `auditlog` Table ### `auditlog` Table
The standard Zabbix `auditlog` table has a primary key on `(auditid)`. Partitioning by `clock` requires the partition key to be part of the primary key. The standard `auditlog` table Primary Key is `(auditid)`. Partitioning by `clock` requires the partition key to be part of the Primary Key. The initialization script modifies the PK to `(auditid, clock)`.
To prevent placing a heavy, blocking lock on an `auditlog` table to alter its primary key, the enablement script (`02_enable_partitioning.sql`) detects it and handles it exactly like the history tables: it automatically renames the live, existing table to `auditlog_old`, and instantly creates a brand new, empty partitioned `auditlog` table pre-configured with the required `(auditid, clock)` composite primary key.
### Converting Existing Tables ### Converting Existing Tables
The enablement script guarantees practically zero downtime by automatically renaming the existing tables to `table_name_old` and creating new partitioned tables matching the exact schema. The enablement script renames the existing table to `table_name_old` and creates a new partitioned table with the same structure.
* **Note**: Data from the old tables is NOT automatically migrated to minimize downtime. * **Note**: Data from the old table is NOT automatically migrated to minimize downtime.
* New data flows into the new partitioned tables immediately. * New data flows into the new partitioned table immediately.
* Old data remains accessible in `table_name_old` for manual lookup or migration if required. * Old data remains accessible in `table_name_old` for manual query or migration if required.
## Upgrades ## Upgrades

View File

@@ -1,3 +0,0 @@
# Script-based Partitioning
(Coming soon)

View File

@@ -1,32 +0,0 @@
# Zabbix PostgreSQL Partitioning Monitoring
This template relies on Zabbix Agent 2 and its PostgreSQL plugin. It allows you to monitor the health of your partitioned PostgreSQL database tables. It uses a single master item to pull all metrics in bulk over a single database connection, dynamically distributing the numbers to Zabbix using Dependent Items.
There are three item prototypes:
1. Future Partitions Buffer: Number of future partitions to be created
2. Total Size Bytes: Total size of the partitioned table in bytes
3. Time Since Last Maintenance: Time since the last maintenance script was run
They allows to monitor all the critical metrics and also they do have a triggers, which will create a problem in case something is wrong with the partitioning.
### Setup
1. Copy the SQL file (`template/partitions.get_all.sql`) into a directory on your Agent machine. E.g., `/etc/zabbix/zabbix_agent2.d/postgresql/`.
2. Install zabbix-agent2-plugin-postgresql package.
3. Open your Plugin configuration file `/etc/zabbix/zabbix_agent2.d/plugins.d/postgresql.conf` and add these lines to establish your custom query module AND a secure named session (e.g., `AWS_RDS`). Adjust the parameters to match your environment. You can use uri instead of named session if you want. In this case you will need to modify the item keys to use the correct parameters.
```ini
# 1. Enable Loadable Custom Queries (Mandatory in Zabbix 7.4+)
Plugins.PostgreSQL.CustomQueriesPath=/etc/zabbix/zabbix_agent2.d/postgresql/
Plugins.PostgreSQL.CustomQueriesEnabled=true
# 2. Establish a Secure Backend Session
Plugins.PostgreSQL.Sessions.AWS_RDS.Uri=tcp://your-cluster-endpoint.amazonaws.com:5432
Plugins.PostgreSQL.Sessions.AWS_RDS.User=zabbix
Plugins.PostgreSQL.Sessions.AWS_RDS.Password=<YOUR_ZABBIX_PASSWORD>
Plugins.PostgreSQL.Sessions.AWS_RDS.TLSConnect=verify_full
Plugins.PostgreSQL.Sessions.AWS_RDS.TLSCAFile=/etc/zabbix/global-bundle.pem
```
4. Restart your agent to apply the changes:
```bash
systemctl restart zabbix-agent2
```
5. Import the `zbx_pg_partitions_monitor_agent2.yaml` template into your Zabbix.
6. Link the template to your Host, navigate to its "Macros" tab, and define the needed macros (in this case it's just named session):
* `{$PG.CONNSTRING.AGENT2}`: `AWS_RDS`

View File

@@ -1,8 +0,0 @@
SELECT
table_name,
period,
keep_history::text AS keep_history,
future_partitions,
total_size_bytes,
EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds
FROM partitions.monitoring;

View File

@@ -1,137 +0,0 @@
zabbix_export:
version: '7.0'
template_groups:
- uuid: 748ad4d098d447d492bb935c907f652f
name: Templates/Databases
templates:
- uuid: a1d5f8c3b2e44a7c9d6b1f2e8a3c5b4d
template: 'PostgreSQL Partitioning by Zabbix Agent 2'
name: 'PostgreSQL Partitioning by Zabbix Agent 2'
description: 'Monitors the custom partitions.monitoring view via the native Zabbix Agent 2 PostgreSQL plugin. Using a single master to minimize the DB connections and load.'
vendor:
name: Zabbix Support
version: 7.0-0
groups:
- name: Templates/Databases
items:
- uuid: b8c7d6e5f4a34b2c8d2e3f4a5b6c7d8e
name: 'PostgreSQL: Get Partitioning Data'
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
history: '0'
value_type: TEXT
description: 'Master item that queries all partition statistics in a single bulk JSON sequence.'
tags:
- tag: component
value: raw
discovery_rules:
- uuid: b7c2a5d8f1e44b9c8a3f6d2e1c5b4a7d
name: 'Partitioned Tables Discovery'
type: DEPENDENT
key: db.partitions.discovery.dependent
item_prototypes:
- uuid: f1a2b3c4d5e64f7a9b8c7d6e5f4a3b2c
name: '{#TABLE_NAME}: Time Since Last Maintenance'
type: DEPENDENT
key: 'db.partitions.age["{#TABLE_NAME}"]'
units: s
preprocessing:
- type: JSONPATH
parameters:
- '$.[?(@.table_name == "{#TABLE_NAME}")].age_seconds.first()'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
tags:
- tag: metric
value: age
- tag: table
value: '{#TABLE_NAME}'
trigger_prototypes:
- uuid: a9b8c7d6e5f44a3b8c1d2e3f4a5b6c7d
expression: 'last(/PostgreSQL Partitioning by Zabbix Agent 2/db.partitions.age["{#TABLE_NAME}"])>{$PARTITIONS.AGE}'
name: 'Table {#TABLE_NAME}: Maintenance script has not run successfully in over 48 hours'
priority: WARNING
- uuid: c4b9e2a5f1d84c7a9f3b6d1e5a2c8b4d
name: '{#TABLE_NAME}: Future Partitions Buffer'
type: DEPENDENT
key: 'db.partitions.future["{#TABLE_NAME}"]'
preprocessing:
- type: JSONPATH
parameters:
- '$.[?(@.table_name == "{#TABLE_NAME}")].future_partitions.first()'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
tags:
- tag: metric
value: partitions
- tag: table
value: '{#TABLE_NAME}'
trigger_prototypes:
- uuid: d6e3a5c8b2f14d9e8a7b6c5d4e3f2a1b
expression: 'last(/PostgreSQL Partitioning by Zabbix Agent 2/db.partitions.future["{#TABLE_NAME}"])<{$PARTITIONS.LOW}'
name: 'Table {#TABLE_NAME}: Future partitions buffer is critically low (< 2)'
priority: HIGH
- uuid: e8f2a1b3c4d54e6f9a8b7c6d5e4f3a2b
name: '{#TABLE_NAME}: Total Size Bytes'
type: DEPENDENT
key: 'db.partitions.size["{#TABLE_NAME}"]'
units: B
preprocessing:
- type: JSONPATH
parameters:
- '$.[?(@.table_name == "{#TABLE_NAME}")].total_size_bytes.first()'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
tags:
- tag: metric
value: size
- tag: table
value: '{#TABLE_NAME}'
- uuid: ffa2b3c4d5e64f7a9b8c7d6e5f4a1001
name: '{#TABLE_NAME}: Configured Partition Period'
type: DEPENDENT
key: 'db.partitions.period["{#TABLE_NAME}"]'
value_type: CHAR
preprocessing:
- type: JSONPATH
parameters:
- '$.[?(@.table_name == "{#TABLE_NAME}")].period.first()'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
tags:
- tag: metric
value: config
- tag: table
value: '{#TABLE_NAME}'
- uuid: ffa2b3c4d5e64f7a9b8c7d6e5f4a1002
name: '{#TABLE_NAME}: Configured Retention (Keep History)'
type: DEPENDENT
key: 'db.partitions.retention["{#TABLE_NAME}"]'
value_type: CHAR
preprocessing:
- type: JSONPATH
parameters:
- '$.[?(@.table_name == "{#TABLE_NAME}")].keep_history.first()'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
tags:
- tag: metric
value: config
- tag: table
value: '{#TABLE_NAME}'
master_item:
key: 'pgsql.custom.query["{$PG.CONNSTRING.AGENT2}",,,"{$PG.DBNAME}","partitions.get_all"]'
lld_macro_paths:
- lld_macro: '{#TABLE_NAME}'
path: $.table_name
macros:
- macro: '{$PARTITIONS.AGE}'
value: 24h
description: 'The maximum period during which no new partitions may be created'
- macro: '{$PARTITIONS.LOW}'
value: '2'
description: 'The minimum number of partitions that must exist in the future'
- macro: '{$PG.CONNSTRING.AGENT2}'
value: AWS_RDS
description: 'Session name or URI of the PostgreSQL instance'
- macro: '{$PG.DBNAME}'
value: zabbix