Compare commits

..

22 Commits

Author SHA1 Message Date
Maksym Buz
f38e9677e5 Enhance partitioning logic and update Zabbix config template 2026-04-02 17:02:16 +00:00
Maksym Buz
a28a789454 docs(architecture): remove deprecated auditlog prep script step and correct numbering 2026-03-30 21:22:16 +00:00
Maksym Buz
3b4f0c9c75 build(repo): structure the repo into postgresql subdirectory with separate template and internal tests 2026-03-30 21:18:01 +00:00
Maksym Buz
7339bf5db0 chore(template): update template compatibility version to 7.0 2026-03-30 21:01:28 +00:00
Maksym Buz
39e37286f2 docs(procedures): refine zabbix monitor user instructions and pg_cron management section 2026-03-30 20:59:06 +00:00
Maksym Buz
db2bc25a84 docs: finalize architecture docs, agent 2 template and procedures privileges 2026-03-30 20:53:04 +00:00
Maksym Buz
32a587172e docs: move pg_cron job management instructions to generic maintenance section 2026-03-30 20:46:55 +00:00
Maksym Buz
505933e880 feat(template): remove ODBC template, move agent2 SQL, and track template directory 2026-03-30 19:55:12 +00:00
Maksym Buz
119b25f1a0 feat(versioning): update versioning scheme to use zabbix major releases (7-1) 2026-03-30 19:51:56 +00:00
Maksym Buz
9d77fac4a4 refactor(procedures): rename init script and revert monitoring view 2026-03-30 19:40:32 +00:00
Maksym Buz
888f61a2c8 docs: add table of contents to procedures readme and update gitignore 2026-03-30 19:13:58 +00:00
Maksym Buz
944b463b75 feat(monitoring): expose raw total_size_bytes for native Zabbix graphing 2026-03-26 19:48:41 +00:00
Maksym Buz
59170a77e6 fix(partitioning): add exact Zabbix auditlog indexes with renamed suffix to prevent IF NOT EXISTS collision 2026-03-26 19:35:53 +00:00
Maksym Buz
2b7a69ba11 Refactor auditlog preparation, rename procedures sequentially, and update test suite 2026-03-26 15:57:35 +00:00
Maksym Buz
14f38efafd feat: Add support for AWS RDS deployment and enhance maintenance scheduling documentation with pg_cron and Systemd Timer options. 2026-03-20 17:56:09 +00:00
Maksym Buz
487f95020d style: Remove redundant script and description prefixes from SQL file comment headers. 2026-02-20 21:58:06 +00:00
Maksym Buz
ea3e89effa feat: Enhanced partitioning procedures with schema awareness. 2026-02-20 21:46:27 +00:00
Maksym Buz
9d1b84225c docs: Change admonition type from critical to warning in README.md 2026-02-20 18:57:45 +00:00
Maksym Buz
c77eb8e4af docs: Correct housekeeping path in the critical warning section of the README. 2026-02-20 17:56:25 +00:00
Maksym Buz
91eb4e17b8 fix: correct syntax errors and refactor interval parsing 2026-02-19 23:20:09 +00:00
Maksym Buz
d7b8c7c9c3 change: Added test scripts for Docker initialization 2026-02-19 22:09:54 +00:00
Maksym Buz
c4420bc1ad refactor: COMMITs added to release locks immediately. UTC usage. Testing env for test branch. 2026-02-19 21:53:51 +00:00
37 changed files with 1177639 additions and 399 deletions

View File

@@ -22,7 +22,7 @@ CREATE TABLE IF NOT EXISTS partitions.version (
description text description text
); );
INSERT INTO partitions.version (version, description) VALUES ('7-2', 'Added housekeeper task interceptor trigger to drop tasks for partitioned tables') INSERT INTO partitions.version (version, description) VALUES ('7-1', 'Zabbix 7.4 and 7.0 compatible version')
ON CONFLICT (version) DO NOTHING; ON CONFLICT (version) DO NOTHING;
-- Default configuration for Zabbix tables (adjust as needed) -- Default configuration for Zabbix tables (adjust as needed)
@@ -32,16 +32,9 @@ INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('history_uint', 'day', '30 days'), ('history_uint', 'day', '30 days'),
('history_str', 'day', '30 days'), ('history_str', 'day', '30 days'),
('history_log', 'day', '30 days'), ('history_log', 'day', '30 days'),
('history_text', 'day', '30 days'), ('history_text', 'day', '30 days')
('history_bin', 'day', '30 days')
ON CONFLICT (table_name) DO NOTHING; ON CONFLICT (table_name) DO NOTHING;
-- Zabbix 8.0+ only: Uncomment the following lines if running Zabbix 8.0 or later
-- INSERT INTO partitions.config (table_name, period, keep_history) VALUES
-- ('history_json', 'day', '30 days')
-- ON CONFLICT (table_name) DO NOTHING;
-- Trends tables: Monthly partitions, keep 12 months -- Trends tables: Monthly partitions, keep 12 months
INSERT INTO partitions.config (table_name, period, keep_history) VALUES INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('trends', 'month', '12 months'), ('trends', 'month', '12 months'),

View File

@@ -2,15 +2,14 @@
-- Core functions for Zabbix partitioning (Create, Drop, Maintain). -- Core functions for Zabbix partitioning (Create, Drop, Maintain).
-- ============================================================================ -- ============================================================================
-- Function to check if a partition exists in a specific schema -- Function to check if a partition exists
CREATE OR REPLACE FUNCTION partitions.partition_exists(p_partition_name text, p_schema text) CREATE OR REPLACE FUNCTION partitions.partition_exists(p_partition_name text)
RETURNS boolean AS $$ RETURNS boolean AS $$
BEGIN BEGIN
RETURN EXISTS ( RETURN EXISTS (
SELECT 1 FROM pg_class c SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = p_partition_name WHERE c.relname = p_partition_name
AND n.nspname = p_schema
); );
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;
@@ -33,7 +32,7 @@ BEGIN
SELECT n.nspname INTO v_parent_schema SELECT n.nspname INTO v_parent_schema
FROM pg_class c FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = p_parent_table AND pg_table_is_visible(c.oid); WHERE c.relname = p_parent_table;
IF NOT FOUND THEN IF NOT FOUND THEN
RAISE EXCEPTION 'Parent table % not found', p_parent_table; RAISE EXCEPTION 'Parent table % not found', p_parent_table;
@@ -52,20 +51,11 @@ BEGIN
v_partition_name := p_parent_table || '_p' || v_suffix; v_partition_name := p_parent_table || '_p' || v_suffix;
IF NOT partitions.partition_exists(v_partition_name, v_parent_schema) THEN IF NOT partitions.partition_exists(v_partition_name) THEN
BEGIN
EXECUTE format( EXECUTE format(
'CREATE TABLE %I.%I PARTITION OF %I.%I FOR VALUES FROM (%s) TO (%s)', 'CREATE TABLE %I.%I PARTITION OF %I.%I FOR VALUES FROM (%s) TO (%s)',
v_parent_schema, v_partition_name, v_parent_schema, p_parent_table, v_start_ts, v_end_ts v_parent_schema, v_partition_name, v_parent_schema, p_parent_table, v_start_ts, v_end_ts
); );
EXCEPTION
WHEN invalid_object_definition THEN
-- Ignore overlap errors (e.g., when transitioning from daily to hourly partitioning)
RAISE NOTICE 'Partition % overlaps with an existing partition. Skipping.', v_partition_name;
WHEN duplicate_table THEN
-- Ignore race condition: another process created the partition concurrently
RAISE NOTICE 'Partition % already exists (concurrent creation). Skipping.', v_partition_name;
END;
END IF; END IF;
END; END;
$$; $$;
@@ -94,7 +84,7 @@ BEGIN
JOIN pg_class parent ON pg_inherits.inhparent = parent.oid JOIN pg_class parent ON pg_inherits.inhparent = parent.oid
JOIN pg_class child ON pg_inherits.inhrelid = child.oid JOIN pg_class child ON pg_inherits.inhrelid = child.oid
JOIN pg_namespace n ON child.relnamespace = n.oid JOIN pg_namespace n ON child.relnamespace = n.oid
WHERE parent.relname = p_parent_table AND pg_table_is_visible(parent.oid) WHERE parent.relname = p_parent_table
LOOP LOOP
-- Parse partition suffix to determine age -- Parse partition suffix to determine age
-- Format: parent_pYYYYMM or parent_pYYYYMMDD -- Format: parent_pYYYYMM or parent_pYYYYMMDD
@@ -102,11 +92,11 @@ BEGIN
BEGIN BEGIN
IF length(v_suffix) = 6 THEN -- YYYYMM IF length(v_suffix) = 6 THEN -- YYYYMM
v_partition_date := timezone('UTC', to_timestamp(v_suffix || '01', 'YYYYMMDD')::timestamp without time zone); v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC';
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD
v_partition_date := timezone('UTC', to_timestamp(v_suffix, 'YYYYMMDD')::timestamp without time zone); v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC';
ELSIF length(v_suffix) = 10 THEN -- YYYYMMDDHH ELSIF length(v_suffix) = 10 THEN -- YYYYMMDDHH
v_partition_date := timezone('UTC', to_timestamp(v_suffix, 'YYYYMMDDHH24')::timestamp without time zone); v_partition_date := to_timestamp(v_suffix, 'YYYYMMDDHH24') AT TIME ZONE 'UTC';
ELSE ELSE
CONTINUE; -- Ignore non-matching suffix lengths CONTINUE; -- Ignore non-matching suffix lengths
END IF; END IF;
@@ -163,25 +153,25 @@ DECLARE
BEGIN BEGIN
IF p_period = 'day' THEN IF p_period = 'day' THEN
v_period_interval := '1 day'::interval; v_period_interval := '1 day'::interval;
v_start_time := date_trunc('day', now(), 'UTC'); v_start_time := date_trunc('day', now() AT TIME ZONE 'UTC');
-- Calculate how many past days cover the retention period (86400 seconds = 1 day) -- Calculate how many past days cover the retention period (86400 seconds = 1 day)
v_past_iterations := ceil(extract(epoch from p_keep_history) / 86400)::integer; v_past_iterations := ceil(extract(epoch from p_keep_history) / 86400)::integer;
ELSIF p_period = 'week' THEN ELSIF p_period = 'week' THEN
v_period_interval := '1 week'::interval; v_period_interval := '1 week'::interval;
v_start_time := date_trunc('week', now(), 'UTC'); v_start_time := date_trunc('week', now() AT TIME ZONE 'UTC');
-- 604800 seconds = 1 week -- 604800 seconds = 1 week
v_past_iterations := ceil(extract(epoch from p_keep_history) / 604800)::integer; v_past_iterations := ceil(extract(epoch from p_keep_history) / 604800)::integer;
ELSIF p_period = 'month' THEN ELSIF p_period = 'month' THEN
v_period_interval := '1 month'::interval; v_period_interval := '1 month'::interval;
v_start_time := date_trunc('month', now(), 'UTC'); v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC');
-- Approximate 30 days per month (2592000 seconds) -- Approximate 30 days per month (2592000 seconds)
v_past_iterations := ceil(extract(epoch from p_keep_history) / 2592000)::integer; v_past_iterations := ceil(extract(epoch from p_keep_history) / 2592000)::integer;
ELSIF p_period LIKE '%hour%' THEN ELSIF p_period LIKE '%hour%' THEN
v_period_interval := p_period::interval; v_period_interval := p_period::interval;
v_start_time := to_timestamp(floor(extract(epoch from now()) / extract(epoch from v_period_interval)) * extract(epoch from v_period_interval)); v_start_time := date_trunc('hour', now() AT TIME ZONE 'UTC');
v_past_iterations := ceil(extract(epoch from p_keep_history) / extract(epoch from v_period_interval))::integer; v_past_iterations := ceil(extract(epoch from p_keep_history) / extract(epoch from v_period_interval))::integer;
ELSE ELSE
@@ -231,14 +221,3 @@ BEGIN
END LOOP; END LOOP;
END; END;
$$; $$;
-- Trigger function to silently discard housekeeper tasks for partitioned tables
CREATE OR REPLACE FUNCTION partitions.housekeeper_insert_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF EXISTS (SELECT 1 FROM partitions.config WHERE table_name = NEW.tablename) THEN
RETURN NULL;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View File

@@ -19,10 +19,10 @@ BEGIN
SELECT n.nspname INTO v_schema SELECT n.nspname INTO v_schema
FROM pg_class c FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = v_table AND pg_table_is_visible(c.oid); WHERE c.relname = v_table;
IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r' AND pg_table_is_visible(oid)) THEN IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r') THEN
RAISE NOTICE 'Converting table % to partitioned table...', v_table; RAISE NOTICE 'Converting table % to partitioned table...', v_table;
-- 1. Rename existing table -- 1. Rename existing table
@@ -48,37 +48,12 @@ BEGIN
-- Optional: Migrate existing data -- Optional: Migrate existing data
-- EXECUTE format('INSERT INTO %I.%I SELECT * FROM %I.%I', v_schema, v_table, v_schema, v_old_table); -- EXECUTE format('INSERT INTO %I.%I SELECT * FROM %I.%I', v_schema, v_table, v_schema, v_old_table);
ELSIF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'p' AND pg_table_is_visible(oid)) THEN ELSIF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'p') THEN
RAISE NOTICE 'Table % is already partitioned. Skipping conversion.', v_table; RAISE NOTICE 'Table % is already partitioned. Skipping conversion.', v_table;
-- Just run maintenance for this specific table to ensure partitions exist -- Just run maintenance to ensure partitions exist
CALL partitions.maintain_table(v_table, v_row.period, v_row.keep_history, v_row.future_partitions); CALL partitions.run_maintenance();
ELSE ELSE
RAISE WARNING 'Table % not found!', v_table; RAISE WARNING 'Table % not found!', v_table;
END IF; END IF;
END LOOP; END LOOP;
-- Attach trigger to housekeeper table to silently discard tasks for partitioned tables.
-- Dynamically determine the schema of the housekeeper table to support custom schemas.
SELECT n.nspname INTO v_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'housekeeper' AND pg_table_is_visible(c.oid);
IF v_schema IS NOT NULL THEN
EXECUTE format('DROP TRIGGER IF EXISTS housekeeper_filter ON %I.housekeeper', v_schema);
EXECUTE format('CREATE TRIGGER housekeeper_filter BEFORE INSERT ON %I.housekeeper FOR EACH ROW EXECUTE FUNCTION partitions.housekeeper_insert_trigger()', v_schema);
RAISE NOTICE 'Housekeeper intercept trigger installed on %.housekeeper', v_schema;
ELSE
RAISE WARNING 'housekeeper table not found — trigger NOT installed!';
END IF;
END $$; END $$;
-- ==========================================================================
-- IMPORTANT: If the Zabbix Server connects with a non-superuser (e.g., 'zabbix'),
-- that user MUST have access to the partitions schema for the housekeeper trigger
-- to work. Without these GRANTs, every INSERT into housekeeper will FAIL.
-- Uncomment and adjust the username below:
-- ==========================================================================
-- GRANT USAGE ON SCHEMA partitions TO zabbix;
-- GRANT SELECT ON partitions.config TO zabbix;

View File

@@ -2,13 +2,13 @@
-- Creates a view to monitor partition status and sizes. -- Creates a view to monitor partition status and sizes.
-- ============================================================================ -- ============================================================================
CREATE OR REPLACE VIEW partitions.monitoring AS DROP VIEW IF EXISTS partitions.monitoring;
CREATE VIEW partitions.monitoring AS
SELECT SELECT
parent.relname AS parent_table, parent.relname AS parent_table,
c.table_name, c.table_name,
c.period, c.period,
c.keep_history, c.keep_history,
c.future_partitions AS configured_future_partitions,
count(child.relname) AS partition_count, count(child.relname) AS partition_count,
count(child.relname) FILTER ( count(child.relname) FILTER (
WHERE WHERE
@@ -19,15 +19,15 @@ SELECT
(c.period = 'week' AND child.relname > (parent.relname || '_p' || to_char(date_trunc('week', now() AT TIME ZONE 'UTC'), 'YYYYMMDD'))) (c.period = 'week' AND child.relname > (parent.relname || '_p' || to_char(date_trunc('week', now() AT TIME ZONE 'UTC'), 'YYYYMMDD')))
OR OR
(c.period LIKE '%hour%' AND child.relname > (parent.relname || '_p' || to_char(now() AT TIME ZONE 'UTC', 'YYYYMMDDHH24'))) (c.period LIKE '%hour%' AND child.relname > (parent.relname || '_p' || to_char(now() AT TIME ZONE 'UTC', 'YYYYMMDDHH24')))
) AS actual_future_partitions, ) AS future_partitions,
sum(pg_total_relation_size(child.oid)) AS total_size_bytes, sum(pg_total_relation_size(child.oid)) AS total_size_bytes,
pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size, pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size,
min(child.relname) AS oldest_partition, min(child.relname) AS oldest_partition,
max(child.relname) AS newest_partition, max(child.relname) AS newest_partition,
c.last_updated c.last_updated
FROM partitions.config c FROM partitions.config c
JOIN pg_class parent ON parent.relname = c.table_name AND pg_table_is_visible(parent.oid) JOIN pg_class parent ON parent.relname = c.table_name
LEFT JOIN pg_inherits ON pg_inherits.inhparent = parent.oid LEFT JOIN pg_inherits ON pg_inherits.inhparent = parent.oid
LEFT JOIN pg_class child ON pg_inherits.inhrelid = child.oid LEFT JOIN pg_class child ON pg_inherits.inhrelid = child.oid
WHERE parent.relkind = 'p' -- Only partitioned tables WHERE parent.relkind = 'p' -- Only partitioned tables
GROUP BY parent.relname, c.table_name, c.period, c.keep_history, c.future_partitions, c.last_updated; GROUP BY parent.relname, c.table_name, c.period, c.keep_history, c.last_updated;

View File

@@ -1,79 +0,0 @@
-- ============================================================================
-- Reverts Zabbix partitioned tables back to standard non-partitioned tables.
-- Existing partitioned tables will be renamed to *_part (data is preserved).
-- ============================================================================
DO $$
DECLARE
v_row record;
v_table text;
v_part_table text;
v_schema text;
BEGIN
FOR v_row IN SELECT * FROM partitions.config LOOP
v_table := v_row.table_name;
v_part_table := v_table || '_part';
-- Determine schema of the partitioned table
SELECT n.nspname INTO v_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = v_table AND c.relkind = 'p' AND pg_table_is_visible(c.oid);
IF v_schema IS NOT NULL THEN
RAISE NOTICE 'Reverting partitioned table %...', v_table;
-- 1. Rename existing partitioned table to *_part
EXECUTE format('ALTER TABLE %I.%I RENAME TO %I', v_schema, v_table, v_part_table);
-- 2. Create standard (unpartitioned) replacement table based on the structure
IF v_table = 'auditlog' THEN
-- For auditlog, we need to try and restore the original single-column PK (auditid) if possible
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING DEFAULTS INCLUDING COMMENTS)', v_schema, v_table, v_schema, v_part_table);
BEGIN
EXECUTE format('ALTER TABLE %I.%I ADD PRIMARY KEY (auditid)', v_schema, v_table);
EXCEPTION WHEN others THEN
RAISE WARNING 'Failed to create primary key on auditlog, might already exist or duplicates present.';
END;
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_1 ON %I.%I (userid, clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_2 ON %I.%I (clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_3 ON %I.%I (resourcetype, resourceid)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_4 ON %I.%I (recordsetid)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_5 ON %I.%I (ip)', v_schema, v_table);
ELSE
-- For others, copy everything including indexes
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING ALL)', v_schema, v_table, v_schema, v_part_table);
END IF;
RAISE NOTICE 'SUCCESS: % reverted to default. Partitioned data stored in % (You can DROP TABLE % CASCADE; later).', v_table, v_part_table, v_part_table;
ELSIF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r' AND pg_table_is_visible(oid)) THEN
RAISE NOTICE 'Table % is already a regular table. Skipping.', v_table;
ELSE
RAISE WARNING 'Partitioned table % not found!', v_table;
END IF;
END LOOP;
-- Drop the housekeeper intercept trigger (dynamically determine schema for custom schema support)
SELECT n.nspname INTO v_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = 'housekeeper' AND pg_table_is_visible(c.oid);
IF v_schema IS NOT NULL THEN
EXECUTE format('DROP TRIGGER IF EXISTS housekeeper_filter ON %I.housekeeper', v_schema);
RAISE NOTICE 'Housekeeper intercept trigger removed from %.housekeeper', v_schema;
ELSE
RAISE WARNING 'housekeeper table not found — trigger removal skipped.';
END IF;
RAISE NOTICE '================================================================================';
RAISE NOTICE 'Undo complete. Partitioned tables have been renamed to *_part.';
RAISE NOTICE 'If you want to migrate your history back, you must do it manually:';
RAISE NOTICE ' INSERT INTO history SELECT * FROM history_part;';
RAISE NOTICE 'Once done, or if you do not need the data, drop the partitioned tables:';
RAISE NOTICE ' DROP TABLE history_part CASCADE;';
RAISE NOTICE 'After that, you can safely remove the partitions infrastructure:';
RAISE NOTICE ' DROP SCHEMA partitions CASCADE;';
RAISE NOTICE '================================================================================';
END $$;

View File

@@ -1,161 +0,0 @@
# Zabbix Partitioning Deployment Manual
This guide provides a step-by-step process for deploying the PostgreSQL partitioning solution for Zabbix.
**🚨 DANGER: CRITICAL WARNING 🚨**
**BEFORE YOU PROCEED, YOU ABSOLUTELY MUST TAKE A FULL BACKUP OF YOUR ZABBIX DATABASE.**
**DO NOT SKIP THIS STEP. Schema modifications are dangerous. If something goes wrong and you do not have a backup, your historical data will be lost permanently, and we take ZERO responsibility.**
---
## Step 1: Preparation & Safety
Because database migrations can take time (especially on large tables), **never** run these scripts directly in a standard SSH session that might disconnect.
1. Open a safe terminal session using `tmux` or `screen`:
```bash
tmux new -s zabbix_partitioning
# OR
screen -S zabbix_partitioning
```
2. Disable the Zabbix Housekeeper for History and Trends:
- Go to your Zabbix Web UI -> **Administration** -> **Housekeeping**.
- **Uncheck** "Enable internal housekeeping" for **History and Trends**.
- Click **Update**.
3. Stop your Zabbix Server to ensure no new data is being written during the schema migration:
```bash
sudo systemctl stop zabbix-server
```
---
## Step 2: Database Connection & Schema Selection
Connect to your PostgreSQL server as an administrator (e.g., `postgres` or the database owner).
```bash
psql -U postgres -h localhost
```
Once inside `psql`, connect to your Zabbix database (usually named `zabbix`):
```sql
\c zabbix
```
> [!IMPORTANT]
> **Custom Schemas:** By default, Zabbix installs into the `public` schema. If you installed Zabbix into a custom schema (e.g., `zabbix_schema`), you **must** set your `search_path` now before running the scripts, otherwise they will fail to find your tables:
> ```sql
> SET search_path TO zabbix_schema, public;
> ```
---
## Step 3: Execute Installation Scripts
Run the scripts in the following exact order. You can use the `\i` command in `psql` if you are in the `procedures` directory, or specify the full path.
**1. Create the partitioning schema and config tables:**
> [!NOTE]
> **Zabbix 8.0+ Users:** Zabbix 8.0 introduced a new `history_json` table. Before running the script below, open `00_schema_create.sql` in a text editor and uncomment the lines specifically marked for Zabbix 8.0 at the end of the history tables block.
```sql
\i 00_schema_create.sql
```
**2. Install the maintenance logic and functions:**
```sql
\i 01_maintenance.sql
```
**3. Enable Partitioning (MIGRATION STEP):**
*This step renames your existing large tables to `_old` and instantly creates new partitioned tables. This might take a few moments.*
```sql
\i 02_enable_partitioning.sql
```
**4. Create the Monitoring View:**
```sql
\i 03_monitoring_view.sql
```
---
## Step 4: Schedule Automated Maintenance
Partitioning requires a daily job to create new partitions for tomorrow and drop old partitions from last month.
If you are using **AWS RDS** or a managed database with `pg_cron` enabled, run this inside `psql`:
```sql
CREATE EXTENSION IF NOT EXISTS pg_cron;
SELECT cron.schedule('zabbix_partition_maintenance', '30 5,23 * * *', 'CALL partitions.run_maintenance();');
```
*(If you are self-hosting and don't have `pg_cron`, please refer to the `README.md` for instructions on setting up standard OS `cron` or systemd timers.)*
---
## Step 5: Start Zabbix Server
Now that the database is fully partitioned, you can safely start Zabbix Server again:
```bash
sudo systemctl start zabbix-server
```
*(Note: Your old history data remains in tables like `history_old`. It is no longer visible in the UI. If you need it, you must manually insert it into the new tables. See `README.md` for more details.)*
---
## Step 6: Configure Zabbix Agent Monitoring
To ensure your partitions don't run out, you must monitor them. We use Zabbix Agent 2 for this.
1. On your database server (where Zabbix Agent 2 is installed), create the SQL query file using this simple one-liner. Copy and paste the entire block below into your terminal:
```bash
cat << 'EOF' | sudo tee /etc/zabbix/zabbix_agent2.d/partitions.get_all.sql > /dev/null
SELECT
table_name,
period,
keep_history::text AS keep_history,
configured_future_partitions,
actual_future_partitions,
total_size_bytes,
EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds
FROM partitions.monitoring;
EOF
```
2. Configure the PostgreSQL Plugin by editing `/etc/zabbix/zabbix_agent2.d/plugins.d/postgresql.conf`. Ensure you have defined a session (e.g., `MY_DB`) and enabled custom queries:
```ini
Plugins.PostgreSQL.CustomQueriesPath=/etc/zabbix/zabbix_agent2.d/
Plugins.PostgreSQL.CustomQueriesEnabled=true
# Example Session (replace with your actual credentials)
Plugins.PostgreSQL.Sessions.MY_DB.Uri=tcp://localhost:5432
Plugins.PostgreSQL.Sessions.MY_DB.User=zbx_monitor
Plugins.PostgreSQL.Sessions.MY_DB.Password=your_password
```
3. Restart the Zabbix Agent 2:
```bash
sudo systemctl restart zabbix-agent2
```
---
## Step 7: Import Template in Zabbix
1. Log into your Zabbix Web UI.
2. Go to **Data collection** -> **Templates** and click **Import**.
3. Upload the `template/zbx_pg_partitions_monitor_agent2.yaml` file from this repository.
4. Go to your Database Host in Zabbix, and link the newly imported template: `PostgreSQL Partitioning by Zabbix Agent 2`.
5. On the Host configuration, go to the **Macros** tab.
6. You will see a macro named `{$PG.CONNSTRING.AGENT2}` with the value `<replace_me>`.
7. Change `<replace_me>` to the name of the session you configured in Step 6 (e.g., `MY_DB`).
8. Click **Update**.
**Congratulations!** Your Zabbix database is now fully partitioned, optimized, and monitored.

View File

@@ -21,8 +21,6 @@ This is the declarative partitioning implementation for Zabbix `history*`, `tren
- [Implementation Details](#implementation-details) - [Implementation Details](#implementation-details)
- [`auditlog` Table](#auditlog-table) - [`auditlog` Table](#auditlog-table)
- [Converting Existing Tables](#converting-existing-tables) - [Converting Existing Tables](#converting-existing-tables)
- [PostgreSQL Tuning](#postgresql-tuning)
- [Uninstall / Reverting](#uninstall--reverting)
- [Upgrades](#upgrades) - [Upgrades](#upgrades)
## Architecture ## Architecture
@@ -38,10 +36,27 @@ All procedures, information, statistics and configuration are stored in the `par
## Installation ## Installation
> [!IMPORTANT] The installation is performed by executing the SQL procedures in the following order:
> **Please refer to the [MANUAL.md](MANUAL.md) for the complete, step-by-step, foolproof installation instructions.** 1. Initialize schema (`00_schema_create.sql`).
> The manual contains critical safety procedures, backup warnings, and copy-pasteable commands for a safe deployment. 2. Install maintenance procedures (`01_maintenance.sql`).
3. Enable partitioning on tables (`02_enable_partitioning.sql`).
4. Install monitoring views (`03_monitoring_view.sql`).
**Command Example:**
You can deploy these scripts manually against your Zabbix database using `psql`. Navigate to the `procedures/` directory and run:
```bash
# Connect as the zabbix database user
export PGPASSWORD="your_zabbix_password"
DB_HOST="localhost" # Or your DB endpoint
DB_NAME="zabbix"
DB_USER="zbxpart_admin"
for script in 00_schema_create.sql 01_maintenance.sql 02_enable_partitioning.sql 03_monitoring_view.sql; do
echo "Applying $script..."
psql -h $DB_HOST -U $DB_USER -d $DB_NAME -f "$script"
done
```
## Configuration ## Configuration
@@ -198,24 +213,6 @@ System state can be monitored via the `partitions.monitoring` view. It includes
SELECT * FROM partitions.monitoring; SELECT * FROM partitions.monitoring;
``` ```
### Zabbix Agent Integration
To monitor the state of the partitions directly from Zabbix, you need to provide the Zabbix Agent with the SQL query used to fetch this data. You can automatically generate the required `partitions.get_all.sql` file on your agent using this one-liner:
```bash
cat << 'EOF' | sudo tee /etc/zabbix/zabbix_agent2.d/partitions.get_all.sql > /dev/null
SELECT
table_name,
period,
keep_history::text AS keep_history,
configured_future_partitions,
actual_future_partitions,
total_size_bytes,
EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds
FROM partitions.monitoring;
EOF
```
*(Make sure to adjust the destination path according to your Zabbix Agent template directory)*
### Versioning ### Versioning
To check the installed version of the partitioning solution: To check the installed version of the partitioning solution:
```sql ```sql
@@ -246,59 +243,9 @@ The enablement script guarantees practically zero downtime by automatically rena
* New data flows into the new partitioned tables immediately. * New data flows into the new partitioned tables immediately.
* Old data remains accessible in `table_name_old` for manual lookup or migration if required. * Old data remains accessible in `table_name_old` for manual lookup or migration if required.
### Housekeeper Interceptor
Even when Zabbix Housekeeping is disabled in the UI for History and Trends, the Zabbix Server daemon may still generate and insert tasks into the `housekeeper` table (e.g., when an item or trigger is deleted, it schedules the deletion of its historical data). Without intervention, this results in the `housekeeper` table bloating massively over time, leading to slow sequential scans and `autovacuum` overhead.
To prevent this, this extension installs a `BEFORE INSERT` trigger on the `housekeeper` table.
* When Zabbix attempts to insert a housekeeper task, the trigger intercepts it and checks if the target table is managed in `partitions.config`.
* If the table is partitioned (like `history`), the trigger **silently discards the insert** (`RETURNS NULL`), preventing disk I/O and table bloat entirely.
* If the table is not partitioned (like `events` or `sessions`), the task is allowed to be recorded and is cleaned up naturally by Zabbix.
## PostgreSQL Tuning
Before or immediately after enabling partitioning, you should tune your `postgresql.conf`. The standard configuration is not optimized for partitioned tables and might cause performance degradation or out-of-memory errors.
| Parameter | Recommended | Description |
|-----------|-------------|-------------|
| `max_locks_per_transaction`| `512` (or higher) | **Requires DB Restart.** Default is `64`, which is far too low. PostgreSQL lock tables per partition. With many partitioned tables (e.g., history x 30 days), operations like `pg_dump`, `VACUUM`, or queries crossing multiple boundaries will fail with *“out of shared memory”*. |
| `jit` | `off` | **Highly Recommended.** JIT adds overhead to query planning. With many partitions, JIT can drastically increase CPU usage as PostgreSQL attempts to optimize simple queries across dozens of partitions. |
**Default parameters to verify:**
The following are usually set correctly by default, but you should verify them just in case:
* `enable_partition_pruning = on` : **Critical.** Ensures PostgreSQL only queries the necessary partitions instead of scanning everything.
* `enable_partitionwise_join = off` : Zabbix does not do massive joins on history tables; enabling this only wastes planner CPU time.
* `enable_partitionwise_aggregate = off` : Zabbix doesn't perform complex DB-side `GROUP BY` aggregations on history. Leave it disabled.
## Uninstall / Reverting
If you wish to stop using partitioning and revert back to standard, unpartitioned tables without data loss, carefully follow these steps.
> [!CAUTION]
> Reverting partitioning replaces your partitioned tables with standard empty tables. If you need to retain data from the partitioned period, you must manually migrate it before dropping the partition sets. **Always stop Zabbix Server before proceeding.**
1. **Stop Zabbix Server** to prevent new data from being inserted during the transition.
2. **Execute Undo Script:** Run the `04_undo_partitioning.sql` script to recreate non-partitioned tables matching your original Zabbix schema. This script will rename your current partitioned tables to `*_part` (`history_part`, `trends_part`, etc.) and automatically create native, clean tables (`history`, `trends`) in their place.
```bash
psql -h $DB_HOST -U zbxpart_admin -d zabbix -f 04_undo_partitioning.sql
```
3. **Data Migration (Optional):** If you want to keep the metrics collected during the partitioned period, you must manually insert them into the newly created regular tables. This step can take hours depending on table sizes.
```sql
INSERT INTO history SELECT * FROM history_part;
INSERT INTO trends SELECT * FROM trends_part;
-- Repeat for all tables you wish to restore
```
4. **Cleanup:** Once you have migrated the data you need (or if you don't need it at all), you can drop the heavy partitioned tables and remove the partitioning extensions completely.
```sql
DROP TABLE history_part CASCADE;
DROP TABLE history_uint_part CASCADE;
-- Repeat for all *_part tables ...
-- To drop the automatic maintenance infrastructure:
DROP SCHEMA partitions CASCADE;
```
5. **Start Zabbix Server & Re-enable Housekeeper:** Once the tables are replaced, you can start the server. *Don't forget to re-enable Housekeeping for History and Trends in the Zabbix UI!*
## Upgrades ## Upgrades
When upgrading Zabbix:
1. **Backup**: Ensure a full database backup exists. 1. **Backup**: Ensure a full database backup exists.
2. **Compatibility**: Zabbix upgrade scripts may attempt to `ALTER` tables. PostgreSQL supports `ALTER TABLE` on partitioned tables for adding columns, which propagates to partitions. 2. **Compatibility**: Zabbix upgrade scripts may attempt to `ALTER` tables. PostgreSQL supports `ALTER TABLE` on partitioned tables for adding columns, which propagates to partitions.
3. **Failure Scenarios**: If an upgrade script fails due to partitioning, the table may need to be temporarily reverted or the partition structure manually adjusted. 3. **Failure Scenarios**: If an upgrade script fails due to partitioning, the table may need to be temporarily reverted or the partition structure manually adjusted.

View File

@@ -2,8 +2,7 @@ SELECT
table_name, table_name,
period, period,
keep_history::text AS keep_history, keep_history::text AS keep_history,
configured_future_partitions, future_partitions,
actual_future_partitions,
total_size_bytes, total_size_bytes,
EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds EXTRACT(EPOCH FROM (now() - last_updated)) AS age_seconds
FROM partitions.monitoring; FROM partitions.monitoring;

View File

@@ -131,7 +131,7 @@ zabbix_export:
value: '2' value: '2'
description: 'The minimum number of partitions that must exist in the future' description: 'The minimum number of partitions that must exist in the future'
- macro: '{$PG.CONNSTRING.AGENT2}' - macro: '{$PG.CONNSTRING.AGENT2}'
value: '<replace_me>' value: AWS_RDS
description: 'Session name or URI of the PostgreSQL instance' description: 'Session name or URI of the PostgreSQL instance'
- macro: '{$PG.DBNAME}' - macro: '{$PG.DBNAME}'
value: zabbix value: zabbix

View File

@@ -0,0 +1,58 @@
# Zabbix PostgreSQL Partitioning Architecture
This document provides a brief technical overview of the components, logic, and dynamic querying mechanisms that power the PostgreSQL partitioning solution for Zabbix.
## Schema-Agnostic Design
A core architectural principle of this solution is its **schema-agnostic design**. It does not assume that your Zabbix database is installed in the default `public` schema.
When the procedures need to create, drop, or manipulate a partitioned table (e.g., `history`), they do not hardcode the schema. Instead, they dynamically query PostgreSQL's internal system catalogs (`pg_class` and `pg_namespace`) to locate exactly which schema the target table belongs to:
```sql
SELECT n.nspname INTO v_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = v_table;
```
This ensures that the partitioning scripts will work flawlessly, even in custom Zabbix deployments where tables are housed in alternative schemas.
## File Structure & Queries
The solution is divided into a series of SQL scripts that must be executed sequentially to set up the environment.
### 1. `00_schema_create.sql`
* **Purpose:** Initializes the foundation for the partitioning system.
* **Actions:**
* Creates the isolated `partitions` schema to keep everything separate from Zabbix's own structure.
* Creates the `partitions.config` table (which stores retention policies).
* Creates the `partitions.version` table for tracking the installed version.
### 2. `01_maintenance.sql`
* **Purpose:** Contains the core PL/pgSQL procedural logic that manages the lifecycle of the partitions.
* **Key Functions/Procedures:**
* `partition_exists()`: Queries `pg_class` to verify if a specific child partition partition exists.
* `create_partition()`: Executes the DDL `CREATE TABLE ... PARTITION OF ... FOR VALUES FROM (x) TO (y)` to generate a new time-bound chunk.
* `drop_old_partitions()`: Iterates over existing child partitions (using `pg_inherits`) and calculates their age based on their suffix. Drops those older than the defined `keep_history` policy.
* `maintain_table()`: The orchestrator for a single table. It calculates the necessary UTC timestamps, calls `create_partition()` to build the future buffer, calls `create_partition()` recursively backward to cover the retention period, and finally calls `drop_old_partitions()`.
* `run_maintenance()`: The global loop that iterates through `partitions.config` and triggers `maintain_table()` for every configured Zabbix table.
### 3. `02_enable_partitioning.sql`
* **Purpose:** The migration script that actually executes the partition conversion on the live database.
* **Actions:**
* It dynamically locates the existing Primary Key on the active `auditlog` table (usually just `auditid`) and alters it to a composite key `(auditid, clock)` so it supports range partitioning.
* It takes the original Zabbix table (e.g., `history`) and renames it to `history_old` (`ALTER TABLE ... RENAME TO ...`).
* It immediately creates a new partitioned table with the original name, inheriting the exact structure of the old table (`CREATE TABLE ... (LIKE ... INCLUDING ALL) PARTITION BY RANGE (clock)`).
* It triggers the first maintenance run so new incoming data has immediate partitions to land in.
### 4. `03_monitoring_view.sql`
* **Purpose:** Provides an easy-to-read observability layer.
* **Actions:**
* Creates the `partitions.monitoring` view by joining `pg_class`, `pg_inherits`, `pg_tablespace`, and `pg_size_pretty`.
* This view aggregates the total size of each partitioned family and calculates how many "future partitions" exist as a safety buffer.
## Automated Scheduling (`pg_cron`)
While `systemd` timers or standard `cron` can be used to trigger the maintenance, the recommended approach (especially for AWS RDS/Aurora deployments) is using the `pg_cron` database extension.
`pg_cron` allows you to schedule the `CALL partitions.run_maintenance();` procedure directly within PostgreSQL, ensuring the database autonomously manages its own housekeeping without requiring external OS-level access or triggers.

View File

@@ -0,0 +1,90 @@
# Quickstart (PostgreSQL Partitioning Test)
## Start Environment
> **Note**: If `docker` commands fail with permission errors, run `newgrp docker` or ensure your user is in the `docker` group (`sudo usermod -aG docker $USER`) and log out/in.
```bash
cd postgresql/docker
sudo ./run_test_env.sh --pg 16 --zabbix 7.0
# Options: --pg <16|17|18> --zabbix <7.0|7.4>
```
## Verify
```bash
# Check status
docker ps
# SQL Shell
docker exec -it zabbix-db-test psql -U zabbix -d zabbix
# Password: zabbix
```
## Reset
```bash
docker compose down -v
```
## Partitioning
See [ARCHITECTURE.md](../ARCHITECTURE.md) for details on the implemented declarative partitioning.
## AWS RDS / External Database Testing
You can run these partitioning tests against a real AWS RDS (or any external PostgreSQL instance).
### 1. Configure Credentials
First, create a `db_credentials` file in the `postgresql/` directory. (This file is ignored by Git to keep your passwords safe).
Example `postgresql/db_credentials`:
```bash
# Admin credentials
export DB_HOST="your-rds-endpoint.rds.amazonaws.com"
export DB_PORT="5432"
export DB_NAME="postgres"
export DB_USER="postgres"
export DB_PASSWORD="your_admin_password"
# SSL Configuration
export DB_SSL_MODE="verify-full"
export DB_PEM_URL="https://truststore.pki.rds.amazonaws.com/global/global-bundle.pem"
export DB_SSL_ROOT_CERT="./global-bundle.pem"
# Zabbix credentials to be created
export ZBX_DB_NAME="zabbix"
export ZBX_DB_USER="zabbix"
export ZBX_DB_PASSWORD="zabbix_password"
```
### 2. Automated Testing
You can run the same automated deployment script, but instruct it to deploy directly to your RDS instance instead of a local Docker container:
```bash
cd postgresql/docker
./run_test_env.sh --pg 16 --zabbix 7.0 --rds
```
If you want to completely clean up the RDS database and start fresh (terminating existing connections and dropping all data), use the `--rds-drop` flag. You will be prompted to type `yes` to safely confirm the deletion:
```bash
./run_test_env.sh --pg 16 --zabbix 7.0 --rds-drop
```
### 3. Manual Setup & Zabbix Integration
If you want to prepare the real database for your Production Zabbix Server manually, you can just run the initialization script directly:
```bash
cd postgresql
./setup_rds.sh
# To drop an existing database and start fresh, use:
# ./setup_rds.sh --drop
```
The script will automatically connect as the `postgres` user, conditionally download the SSL certificates if needed, and set up the `zabbix` user and database.
Upon success, the script will output the exact block you need to copy into your `zabbix_server.conf`, e.g.:
```ini
DBHost=your-rds-endpoint.rds.amazonaws.com
DBName=zabbix
DBUser=zabbix
DBPassword=zabbix_password
DBPort=5432
DBTLSConnect=verify_full
DBTLSCAFile=/full/path/to/global-bundle.pem
```

View File

@@ -0,0 +1,20 @@
services:
postgres:
image: postgres:${PG_VERSION}
container_name: zabbix-db-test
environment:
POSTGRES_PASSWORD: zabbix
POSTGRES_USER: zabbix
POSTGRES_DB: zabbix
PGDATA: /var/lib/postgresql/data/pgdata
ports:
- "5432:5432"
volumes:
- ./init_scripts:/docker-entrypoint-initdb.d
tmpfs:
- /var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U zabbix"]
interval: 5s
timeout: 5s
retries: 5

View File

@@ -0,0 +1,5 @@
-- Create additional user for partitioning tasks
CREATE USER zbx_part WITH PASSWORD 'zbx_part';
GRANT CONNECT ON DATABASE zabbix TO zbx_part;
-- Grant usage on public schema (standard for PG 15+)
GRANT USAGE ON SCHEMA public TO zbx_part;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,48 @@
-- ============================================================================
-- Creates the 'partitions' schema and configuration table.
-- Defines the structure for managing Zabbix partitioning.
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS partitions;
-- Configuration table to store partitioning settings per table
CREATE TABLE IF NOT EXISTS partitions.config (
table_name text NOT NULL,
period text NOT NULL CHECK (period IN ('day', 'week', 'month', 'year')),
keep_history interval NOT NULL,
future_partitions integer NOT NULL DEFAULT 5,
last_updated timestamp WITH TIME ZONE DEFAULT (now() AT TIME ZONE 'UTC'),
PRIMARY KEY (table_name)
);
-- Table to track installed version of the partitioning solution
CREATE TABLE IF NOT EXISTS partitions.version (
version text PRIMARY KEY,
installed_at timestamp with time zone DEFAULT (now() AT TIME ZONE 'UTC'),
description text
);
-- Set initial version
INSERT INTO partitions.version (version, description) VALUES ('1.0', 'Initial release')
ON CONFLICT (version) DO NOTHING;
-- Default configuration for Zabbix tables (adjust as needed)
-- History tables: Daily partitions, keep 30 days
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('history', 'day', '30 days'),
('history_uint', 'day', '30 days'),
('history_str', 'day', '30 days'),
('history_log', 'day', '30 days'),
('history_text', 'day', '30 days')
ON CONFLICT (table_name) DO NOTHING;
-- Trends tables: Monthly partitions, keep 12 months
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('trends', 'month', '12 months'),
('trends_uint', 'month', '12 months')
ON CONFLICT (table_name) DO NOTHING;
-- Auditlog: Monthly partitions, keep 12 months
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('auditlog', 'month', '12 months')
ON CONFLICT (table_name) DO NOTHING;

View File

@@ -0,0 +1,194 @@
-- ============================================================================
-- Core functions for Zabbix partitioning (Create, Drop, Maintain).
-- ============================================================================
-- Function to check if a partition exists
CREATE OR REPLACE FUNCTION partitions.partition_exists(p_partition_name text)
RETURNS boolean AS $$
BEGIN
RETURN EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = p_partition_name
);
END;
$$ LANGUAGE plpgsql;
-- Function to create a partition
CREATE OR REPLACE PROCEDURE partitions.create_partition(
p_parent_table text,
p_start_time timestamp with time zone,
p_end_time timestamp with time zone,
p_period text
) LANGUAGE plpgsql AS $$
DECLARE
v_partition_name text;
v_start_ts bigint;
v_end_ts bigint;
v_suffix text;
v_parent_schema text;
BEGIN
-- Determine the schema of the parent table
SELECT n.nspname INTO v_parent_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = p_parent_table;
IF NOT FOUND THEN
RAISE EXCEPTION 'Parent table % not found', p_parent_table;
END IF;
-- (No changes needed for time here as passed params are already UTC-adjusted in caller)
v_start_ts := extract(epoch from p_start_time)::bigint;
v_end_ts := extract(epoch from p_end_time)::bigint;
IF p_period = 'month' THEN
v_suffix := to_char(p_start_time, 'YYYYMM');
ELSE
v_suffix := to_char(p_start_time, 'YYYYMMDD');
END IF;
v_partition_name := p_parent_table || '_p' || v_suffix;
IF NOT partitions.partition_exists(v_partition_name) THEN
EXECUTE format(
'CREATE TABLE %I.%I PARTITION OF %I.%I FOR VALUES FROM (%s) TO (%s)',
v_parent_schema, v_partition_name, v_parent_schema, p_parent_table, v_start_ts, v_end_ts
);
END IF;
END;
$$;
-- Function to drop old partitions
CREATE OR REPLACE PROCEDURE partitions.drop_old_partitions(
p_parent_table text,
p_retention interval,
p_period text
) LANGUAGE plpgsql AS $$
DECLARE
v_cutoff_ts bigint;
v_partition record;
v_partition_date timestamp with time zone;
v_suffix text;
v_partition_schema text;
BEGIN
-- Calculate cutoff timestamp
v_cutoff_ts := extract(epoch from (now() - p_retention))::bigint;
FOR v_partition IN
SELECT
child.relname AS partition_name,
n.nspname AS partition_schema
FROM pg_inherits
JOIN pg_class parent ON pg_inherits.inhparent = parent.oid
JOIN pg_class child ON pg_inherits.inhrelid = child.oid
JOIN pg_namespace n ON child.relnamespace = n.oid
WHERE parent.relname = p_parent_table
LOOP
-- Parse partition suffix to determine age
-- Format: parent_pYYYYMM or parent_pYYYYMMDD
v_suffix := substring(v_partition.partition_name from length(p_parent_table) + 3);
BEGIN
IF length(v_suffix) = 6 THEN -- YYYYMM
v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC';
-- For monthly, we check if the END of the month is older than retention?
-- Or just strict retention.
-- To be safe, adding 1 month to check vs cutoff.
IF extract(epoch from (v_partition_date + '1 month'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC';
IF extract(epoch from (v_partition_date + '1 day'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE %I.%I', v_partition.partition_schema, v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
END IF;
EXCEPTION WHEN OTHERS THEN
-- Ignore parsing errors for non-standard partitions
NULL;
END;
END LOOP;
END;
$$;
-- MAIN Procedure to maintain a single table
CREATE OR REPLACE PROCEDURE partitions.maintain_table(
p_table_name text,
p_period text,
p_keep_history interval,
p_future_partitions integer DEFAULT 5
) LANGUAGE plpgsql AS $$
DECLARE
v_start_time timestamp with time zone;
v_period_interval interval;
i integer;
v_past_iterations integer;
BEGIN
IF p_period = 'day' THEN
v_period_interval := '1 day'::interval;
v_start_time := date_trunc('day', now() AT TIME ZONE 'UTC');
-- Calculate how many past days cover the retention period (86400 seconds = 1 day)
v_past_iterations := ceil(extract(epoch from p_keep_history) / 86400)::integer;
ELSIF p_period = 'week' THEN
v_period_interval := '1 week'::interval;
v_start_time := date_trunc('week', now() AT TIME ZONE 'UTC');
-- 604800 seconds = 1 week
v_past_iterations := ceil(extract(epoch from p_keep_history) / 604800)::integer;
ELSIF p_period = 'month' THEN
v_period_interval := '1 month'::interval;
v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC');
-- Approximate 30 days per month (2592000 seconds)
v_past_iterations := ceil(extract(epoch from p_keep_history) / 2592000)::integer;
ELSE
RETURN;
END IF;
-- 1. Create Future Partitions (Current + Buffer)
FOR i IN 0..p_future_partitions LOOP
CALL partitions.create_partition(
p_table_name,
v_start_time + (i * v_period_interval),
v_start_time + ((i + 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
-- 2. Create Past Partitions (Covering retention period)
IF v_past_iterations > 0 THEN
FOR i IN 1..v_past_iterations LOOP
CALL partitions.create_partition(
p_table_name,
v_start_time - (i * v_period_interval),
v_start_time - ((i - 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
END IF;
-- 3. Drop Old Partitions
CALL partitions.drop_old_partitions(p_table_name, p_keep_history, p_period);
-- 4. Update Metadata
UPDATE partitions.config SET last_updated = now() WHERE table_name = p_table_name;
END;
$$;
-- Global Maintenance Procedure
CREATE OR REPLACE PROCEDURE partitions.run_maintenance()
LANGUAGE plpgsql AS $$
DECLARE
v_row record;
BEGIN
FOR v_row IN SELECT * FROM partitions.config LOOP
CALL partitions.maintain_table(v_row.table_name, v_row.period, v_row.keep_history, v_row.future_partitions);
END LOOP;
END;
$$;

View File

@@ -0,0 +1,56 @@
-- ============================================================================
-- Converts standard Zabbix tables to Partitioned tables.
-- WARNING: This renames existing tables to *_old.
-- ============================================================================
DO $$
DECLARE
v_row record;
v_table text;
v_old_table text;
v_pk_sql text;
v_schema text;
BEGIN
FOR v_row IN SELECT * FROM partitions.config LOOP
v_table := v_row.table_name;
v_old_table := v_table || '_old';
-- Determine schema
SELECT n.nspname INTO v_schema
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = v_table;
IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r') THEN
RAISE NOTICE 'Converting table % to partitioned table...', v_table;
-- 1. Rename existing table
EXECUTE format('ALTER TABLE %I.%I RENAME TO %I', v_schema, v_table, v_old_table);
-- 2. Create new partitioned table (handling auditlog PK uniquely)
IF v_table = 'auditlog' THEN
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING DEFAULTS INCLUDING COMMENTS) PARTITION BY RANGE (clock)', v_schema, v_table, v_schema, v_old_table);
EXECUTE format('ALTER TABLE %I.%I ADD PRIMARY KEY (auditid, clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_1 ON %I.%I (userid, clock)', v_schema, v_table);
EXECUTE format('CREATE INDEX IF NOT EXISTS auditlog_2 ON %I.%I (clock)', v_schema, v_table);
ELSE
EXECUTE format('CREATE TABLE %I.%I (LIKE %I.%I INCLUDING ALL) PARTITION BY RANGE (clock)', v_schema, v_table, v_schema, v_old_table);
END IF;
-- 3. Create initial partitions
RAISE NOTICE 'Creating initial partitions for %...', v_table;
CALL partitions.maintain_table(v_table, v_row.period, v_row.keep_history, v_row.future_partitions);
-- Optional: Migrate existing data
-- EXECUTE format('INSERT INTO %I.%I SELECT * FROM %I.%I', v_schema, v_table, v_schema, v_old_table);
ELSIF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'p') THEN
RAISE NOTICE 'Table % is already partitioned. Skipping conversion.', v_table;
-- Just run maintenance to ensure partitions exist
CALL partitions.run_maintenance();
ELSE
RAISE WARNING 'Table % not found!', v_table;
END IF;
END LOOP;
END $$;

View File

@@ -0,0 +1,27 @@
-- ============================================================================
-- Creates a view to monitor partition status and sizes.
-- ============================================================================
CREATE OR REPLACE VIEW partitions.monitoring AS
SELECT
parent.relname AS parent_table,
c.table_name,
c.period,
c.keep_history,
count(child.relname) AS partition_count,
count(child.relname) FILTER (
WHERE
(c.period = 'day' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMMDD')))
OR
(c.period = 'month' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMM')))
) AS future_partitions,
pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size,
min(child.relname) AS oldest_partition,
max(child.relname) AS newest_partition,
c.last_updated
FROM partitions.config c
JOIN pg_class parent ON parent.relname = c.table_name
LEFT JOIN pg_inherits ON pg_inherits.inhparent = parent.oid
LEFT JOIN pg_class child ON pg_inherits.inhrelid = child.oid
WHERE parent.relkind = 'p' -- Only partitioned tables
GROUP BY parent.relname, c.table_name, c.period, c.keep_history, c.last_updated;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
-- ============================================================================
-- SCRIPT: z_gen_history_data.sql
-- DESCRIPTION: Generates mock data for Zabbix history and trends tables.
-- Creates a dummy host and items if they don't exist.
-- ============================================================================
DO $$
DECLARE
v_hostid bigint := 900001;
v_groupid bigint := 900001;
v_interfaceid bigint := 900001;
v_itemid_start bigint := 900001;
v_start_time integer := extract(epoch from (now() - interval '7 days'))::integer;
v_end_time integer := extract(epoch from now())::integer;
i integer;
BEGIN
-- 1. CREATE DUMMY STRUCTURES
-- Host Group
INSERT INTO hstgrp (groupid, name, uuid, type)
VALUES (v_groupid, 'Partition Test Group', 'df77189c49034553999973d8e0500001', 0)
ON CONFLICT DO NOTHING;
-- Host
INSERT INTO hosts (hostid, host, name, status, uuid)
VALUES (v_hostid, 'partition-test-host', 'Partition Test Host', 0, 'df77189c49034553999973d8e0500002')
ON CONFLICT DO NOTHING;
-- Interface
INSERT INTO interface (interfaceid, hostid, main, type, useip, ip, dns, port)
VALUES (v_interfaceid, v_hostid, 1, 1, 1, '127.0.0.1', '', '10050')
ON CONFLICT DO NOTHING;
-- 2. CREATE DUMMY ITEMS AND GENERATE HISTORY
-- Item 1: Numeric Float (HISTORY)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 1, v_hostid, v_interfaceid, 'Test Float Item', 'test.float', 0, 0, '1m', 'df77189c49034553999973d8e0500003');
INSERT INTO history (itemid, clock, value, ns)
SELECT
v_itemid_start + 1,
ts,
random() * 100,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 1,
(ts / 3600) * 3600, -- Hourly truncation
60,
0,
50,
100
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 2: Numeric Unsigned (HISTORY_UINT)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 2, v_hostid, v_interfaceid, 'Test Uint Item', 'test.uint', 0, 3, '1m', 'df77189c49034553999973d8e0500004');
INSERT INTO history_uint (itemid, clock, value, ns)
SELECT
v_itemid_start + 2,
ts,
(random() * 1000)::integer,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends_uint (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 2,
(ts / 3600) * 3600,
60,
0,
500,
1000
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 3: Character (HISTORY_STR)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 3, v_hostid, v_interfaceid, 'Test Str Item', 'test.str', 0, 1, '1m', 'df77189c49034553999973d8e0500005');
INSERT INTO history_str (itemid, clock, value, ns)
SELECT
v_itemid_start + 3,
ts,
'test_value_' || ts,
0
FROM generate_series(v_start_time, v_end_time, 300) AS ts; -- Every 5 mins
END $$;

View File

@@ -0,0 +1,164 @@
#!/bin/bash
# Default values
PG_VERSION=""
ZABBIX_VERSION=""
# Color codes
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
usage() {
echo "Usage: $0 --pg <16|17|18> --zabbix <7.0|7.4> [--rds] [--rds-drop]"
echo "Example: $0 --pg 16 --zabbix 7.0 [--rds-drop]"
exit 1
}
# Parse arguments
USE_RDS=false
DROP_RDS=false
while [[ "$#" -gt 0 ]]; do
case $1 in
--pg) PG_VERSION="$2"; shift ;;
--zabbix) ZABBIX_VERSION="$2"; shift ;;
--rds) USE_RDS=true ;;
--rds-drop) USE_RDS=true; DROP_RDS=true ;;
*) echo "Unknown parameter: $1"; usage ;;
esac
shift
done
if [[ -z "$PG_VERSION" || -z "$ZABBIX_VERSION" ]]; then
echo -e "${RED}Error: detailed arguments required.${NC}"
usage
fi
# Map Zabbix version to sql-scripts folder
if [[ "$ZABBIX_VERSION" == "7.0" ]]; then
SQL_DIR="../sql-scripts-70"
elif [[ "$ZABBIX_VERSION" == "7.4" ]]; then
SQL_DIR="../sql-scripts-74"
else
echo -e "${RED}Error: Unsupported Zabbix version. Use 7.0 or 7.4.${NC}"
exit 1
fi
echo -e "${GREEN}Preparing environment for PostgreSQL $PG_VERSION and Zabbix $ZABBIX_VERSION...${NC}"
# Cleanup previous run
echo "Cleaning up containers and volumes..."
docker compose down -v > /dev/null 2>&1
rm -rf init_scripts
mkdir -p init_scripts
# Symlink SQL scripts
echo "Setting up initialization scripts from $SQL_DIR..."
# 0. Extra Users
if [[ -f "../init_extra_users.sql" ]]; then
cp "../init_extra_users.sql" ./init_scripts/00_init_extra_users.sql
echo "Copied extra user init script."
fi
# 1. Schema
if [[ -f "$SQL_DIR/schema.sql" ]]; then
# Use 01_00 to ensure it comes before 01_10
cp "$SQL_DIR/schema.sql" ./init_scripts/01_00_schema.sql
# 1.1 Partitioning Infrastructure
if [[ -f "../../procedures/00_schema_create.sql" ]]; then
cp "../../procedures/00_schema_create.sql" ./init_scripts/01_10_schema_create.sql
fi
if [[ -f "../../procedures/01_maintenance.sql" ]]; then
cp "../../procedures/01_maintenance.sql" ./init_scripts/01_30_maintenance.sql
fi
if [[ -f "../../procedures/02_enable_partitioning.sql" ]]; then
cp "../../procedures/02_enable_partitioning.sql" ./init_scripts/01_40_enable.sql
fi
if [[ -f "../../procedures/03_monitoring_view.sql" ]]; then
cp "../../procedures/03_monitoring_view.sql" ./init_scripts/01_50_monitoring.sql
fi
else
echo -e "${RED}Error: schema.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 2. Images
if [[ -f "$SQL_DIR/images.sql" ]]; then
cp "$SQL_DIR/images.sql" ./init_scripts/02_images.sql
else
echo -e "${RED}Error: images.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 3. Data
if [[ -f "$SQL_DIR/data.sql" ]]; then
cp "$SQL_DIR/data.sql" ./init_scripts/03_data.sql
else
echo -e "${RED}Error: data.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 4. Mock History Data
if [[ -f "../z_gen_history_data.sql" ]]; then
cp "../z_gen_history_data.sql" ./init_scripts/04_gen_data.sql
echo "Copied mock data generator."
else
echo -e "${RED}Warning: z_gen_history_data.sql not found!${NC}"
fi
# Check logic for 7.4 vs 7.0 (file names might slightly differ or be organized differently if using packages,
# but assuming source layout provided)
# Export variable for Docker Compose
export PG_VERSION=$PG_VERSION
if [ "$USE_RDS" = "true" ]; then
echo -e "${GREEN}Deploying directly to RDS environment...${NC}"
if [ ! -f "../db_credentials" ]; then
echo -e "${RED}Error: ../db_credentials file not found. Please create it first.${NC}"
exit 1
fi
# Initialize RDS (create/drop user and db)
if [ "$DROP_RDS" = "true" ]; then
echo "Initializing Zabbix RDS user and database (with DROP requested)..."
bash ../setup_rds.sh --drop
else
echo "Initializing Zabbix RDS user and database..."
bash ../setup_rds.sh
fi
source ../db_credentials
export PGPASSWORD="$ZBX_DB_PASSWORD"
echo "Applying scripts from init_scripts/ to RDS..."
for sql_file in $(ls ./init_scripts/*.sql | sort); do
echo "Executing $sql_file..."
psql "host=$DB_HOST port=$DB_PORT dbname=$ZBX_DB_NAME user=$ZBX_DB_USER sslmode=$DB_SSL_MODE sslrootcert=../$DB_SSL_ROOT_CERT" -f "$sql_file" -v ON_ERROR_STOP=1
done
echo -e "${GREEN}RDS Environment ready.${NC}"
echo "Connect: psql \"host=$DB_HOST port=$DB_PORT dbname=$ZBX_DB_NAME user=$ZBX_DB_USER sslmode=$DB_SSL_MODE sslrootcert=../$DB_SSL_ROOT_CERT\""
else
# Run Docker Compose
echo -e "${GREEN}Starting PostgreSQL container...${NC}"
docker compose up -d
echo -e "${GREEN}Waiting for database to be ready...${NC}"
# Simple wait loop
for i in {1..30}; do
if docker exec zabbix-db-test pg_isready -U zabbix > /dev/null 2>&1; then
echo -e "${GREEN}Database is ready!${NC}"
break
fi
echo -n "."
sleep 1
done
# Check if data generation finished
echo "To follow initialization logs, run: docker logs -f zabbix-db-test"
echo -e "${GREEN}Environment ready.${NC}"
echo "Connect: psql -h localhost -p 5432 -U zabbix -d zabbix"
fi

View File

@@ -0,0 +1,5 @@
-- Create additional user for partitioning tasks
CREATE USER zbx_part WITH PASSWORD 'zbx_part';
GRANT CONNECT ON DATABASE zabbix TO zbx_part;
-- Grant usage on public schema (standard for PG 15+)
GRANT USAGE ON SCHEMA public TO zbx_part;

101
postgresql/tests/setup_rds.sh Executable file
View File

@@ -0,0 +1,101 @@
#!/bin/bash
set -e
# Change directory to script's location
cd "$(dirname "$0")"
DROP_DB=false
while [[ "$#" -gt 0 ]]; do
case $1 in
--drop) DROP_DB=true ;;
esac
shift
done
# Source credentials from db_credentials file
if [ -f "./db_credentials" ]; then
echo "Loading credentials from db_credentials..."
source ./db_credentials
else
echo "Error: db_credentials file not found in $(pwd)"
exit 1
fi
# 1. Provide the PEM key for AWS RDS if not exists
if [ -n "$DB_PEM_URL" ] && [ ! -f "$DB_SSL_ROOT_CERT" ]; then
echo "Downloading SSL root certificate from AWS..."
wget -qO "$DB_SSL_ROOT_CERT" "$DB_PEM_URL"
fi
# Ensure PEM has right permissions if it exists
if [ -f "$DB_SSL_ROOT_CERT" ]; then
chmod 600 "$DB_SSL_ROOT_CERT"
fi
# 2. Login as the RDS admin user (postgres) to create the zabbix user/database
echo "Connecting to PostgreSQL to create Zabbix user and database..."
export PGPASSWORD="$DB_PASSWORD"
# Create the zabbix user if it doesn't already exist
psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -v ON_ERROR_STOP=1 <<EOF
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '$ZBX_DB_USER') THEN
CREATE ROLE $ZBX_DB_USER WITH LOGIN PASSWORD '$ZBX_DB_PASSWORD';
END IF;
END
\$\$;
EOF
echo "User '$ZBX_DB_USER' verified/created."
# Create the zabbix database if it doesn't already exist
DB_EXISTS=$(psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -t -c "SELECT 1 FROM pg_database WHERE datname='$ZBX_DB_NAME'" | tr -d '[:space:]')
if [ "$DROP_DB" = "true" ] && [ "$DB_EXISTS" = "1" ]; then
echo -e "\n========================================"
echo -e " WARNING! "
echo -e "========================================"
echo -e "You requested to completely DROP and RE-INITIATE the database '$ZBX_DB_NAME'."
echo -e "This will delete ALL data. Are you sure you want to proceed?"
read -p "Type 'yes' to proceed: " confirm_drop
if [ "$confirm_drop" != "yes" ]; then
echo "Database drop cancelled. Exiting."
exit 1
fi
echo "Terminating active connections and dropping database..."
psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$ZBX_DB_NAME' AND pid <> pg_backend_pid();"
psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -c "DROP DATABASE $ZBX_DB_NAME;"
DB_EXISTS=""
fi
if [ "$DB_EXISTS" != "1" ]; then
echo "Database '$ZBX_DB_NAME' does not exist. Creating..."
psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -c "CREATE DATABASE $ZBX_DB_NAME OWNER $ZBX_DB_USER;"
else
echo "Database '$ZBX_DB_NAME' already exists."
fi
# Grant necessary permissions
psql "host=$DB_HOST port=$DB_PORT dbname=$DB_NAME user=$DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT" -c "GRANT ALL PRIVILEGES ON DATABASE $ZBX_DB_NAME TO $ZBX_DB_USER;"
echo ""
echo "================================================================================"
echo "✅ Initialization Successful!"
echo "================================================================================"
echo "You can now use these settings in your Zabbix server configuration:"
echo "--------------------------------------------------------------------------------"
echo "DBHost=$DB_HOST"
echo "DBName=$ZBX_DB_NAME"
echo "DBUser=$ZBX_DB_USER"
echo "DBPassword=$ZBX_DB_PASSWORD"
echo "DBPort=$DB_PORT"
echo "DBTLSConnect=verify_full"
echo "DBTLSCAFile=$(realpath $DB_SSL_ROOT_CERT)"
echo "================================================================================"
echo ""
echo "To connect manually for testing directly to the Zabbix DB:"
echo "export PGPASSWORD=\"$ZBX_DB_PASSWORD\""
echo "psql \"host=$DB_HOST port=$DB_PORT dbname=$ZBX_DB_NAME user=$ZBX_DB_USER sslmode=$DB_SSL_MODE sslrootcert=$DB_SSL_ROOT_CERT\""
echo ""

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,49 @@
ALTER TABLE history RENAME TO history_old;
CREATE TABLE history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_uint RENAME TO history_uint_old;
CREATE TABLE history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_str RENAME TO history_str_old;
CREATE TABLE history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_log RENAME TO history_log_old;
CREATE TABLE history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_text RENAME TO history_text_old;
CREATE TABLE history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,49 @@
ALTER TABLE history RENAME TO history_old;
CREATE TABLE history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_uint RENAME TO history_uint_old;
CREATE TABLE history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_str RENAME TO history_str_old;
CREATE TABLE history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_log RENAME TO history_log_old;
CREATE TABLE history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_text RENAME TO history_text_old;
CREATE TABLE history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,49 @@
ALTER TABLE history RENAME TO history_old;
CREATE TABLE history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_uint RENAME TO history_uint_old;
CREATE TABLE history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_str RENAME TO history_str_old;
CREATE TABLE history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_log RENAME TO history_log_old;
CREATE TABLE history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_text RENAME TO history_text_old;
CREATE TABLE history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
-- ============================================================================
-- SCRIPT: z_gen_history_data.sql
-- DESCRIPTION: Generates mock data for Zabbix history and trends tables.
-- Creates a dummy host and items if they don't exist.
-- ============================================================================
DO $$
DECLARE
v_hostid bigint := 900001;
v_groupid bigint := 900001;
v_interfaceid bigint := 900001;
v_itemid_start bigint := 900001;
v_start_time integer := extract(epoch from (now() - interval '7 days'))::integer;
v_end_time integer := extract(epoch from now())::integer;
i integer;
BEGIN
-- 1. CREATE DUMMY STRUCTURES
-- Host Group
INSERT INTO hstgrp (groupid, name, uuid, type)
VALUES (v_groupid, 'Partition Test Group', 'df77189c49034553999973d8e0500001', 0)
ON CONFLICT DO NOTHING;
-- Host
INSERT INTO hosts (hostid, host, name, status, uuid)
VALUES (v_hostid, 'partition-test-host', 'Partition Test Host', 0, 'df77189c49034553999973d8e0500002')
ON CONFLICT DO NOTHING;
-- Interface
INSERT INTO interface (interfaceid, hostid, main, type, useip, ip, dns, port)
VALUES (v_interfaceid, v_hostid, 1, 1, 1, '127.0.0.1', '', '10050')
ON CONFLICT DO NOTHING;
-- 2. CREATE DUMMY ITEMS AND GENERATE HISTORY
-- Item 1: Numeric Float (HISTORY)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 1, v_hostid, v_interfaceid, 'Test Float Item', 'test.float', 0, 0, '1m', 'df77189c49034553999973d8e0500003');
INSERT INTO history (itemid, clock, value, ns)
SELECT
v_itemid_start + 1,
ts,
random() * 100,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 1,
(ts / 3600) * 3600, -- Hourly truncation
60,
0,
50,
100
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 2: Numeric Unsigned (HISTORY_UINT)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 2, v_hostid, v_interfaceid, 'Test Uint Item', 'test.uint', 0, 3, '1m', 'df77189c49034553999973d8e0500004');
INSERT INTO history_uint (itemid, clock, value, ns)
SELECT
v_itemid_start + 2,
ts,
(random() * 1000)::integer,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends_uint (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 2,
(ts / 3600) * 3600,
60,
0,
500,
1000
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 3: Character (HISTORY_STR)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 3, v_hostid, v_interfaceid, 'Test Str Item', 'test.str', 0, 1, '1m', 'df77189c49034553999973d8e0500005');
INSERT INTO history_str (itemid, clock, value, ns)
SELECT
v_itemid_start + 3,
ts,
'test_value_' || ts,
0
FROM generate_series(v_start_time, v_end_time, 300) AS ts; -- Every 5 mins
END $$;