refactor: COMMITs added to release locks immediately. UTC usage. Testing env for test branch.

This commit is contained in:
Maksym Buz
2026-02-19 21:53:51 +00:00
parent 8565c99310
commit c4420bc1ad
29 changed files with 864923 additions and 13 deletions

10
.gitignore vendored
View File

@@ -1,10 +1,10 @@
# Docker environment
docker/
z_gen_history_data.sql
# docker/
# z_gen_history_data.sql
# Local docs
QUICKSTART.md
init_extra_users.sql
# QUICKSTART.md
# init_extra_users.sql
# Schemas
sql-scripts*/
# sql-scripts*/

52
QUICKSTART.md Normal file
View File

@@ -0,0 +1,52 @@
# Quickstart (PostgreSQL Partitioning Test)
## Start Environment
> **Note**: If `docker` commands fail with permission errors, run `newgrp docker` or ensure your user is in the `docker` group (`sudo usermod -aG docker $USER`) and log out/in.
```bash
cd postgresql/docker
sudo ./run_test_env.sh --pg 16 --zabbix 7.0
# Options: --pg <16|17|18> --zabbix <7.0|7.4>
```
## Verify
```bash
# Check status
docker ps
# SQL Shell
docker exec -it zabbix-db-test psql -U zabbix -d zabbix
# Password: zabbix
```
## Reset
```bash
docker compose down -v
```
## Partitioning
See [PARTITIONING.md](../PARTITIONING.md) for details on the implemented declarative partitioning.
## 🐳 Docker Deployment (Production)
The `run_test_env.sh` script automatically populates `init_scripts` for the test environment. To deploy this in your own Docker setup:
1. **Mount Scripts**: Map the SQL procedures to `/docker-entrypoint-initdb.d/` in your PostgreSQL container.
2. **Order Matters**: Scripts execute alphabetically. Ensure they run **after** the Zabbix schema import.
**Example `docker-compose.yml` snippet:**
```yaml
services:
postgres-server:
image: postgres:16
volumes:
# Mount Zabbix Schema first (e.g., as 01_schema.sql)
- ./zabbix_schema.sql:/docker-entrypoint-initdb.d/01_schema.sql
# Mount Partitioning Procedures (Prefix to run AFTER schema)
- ../postgresql/procedures/00_partitions_init.sql:/docker-entrypoint-initdb.d/02_00_part_init.sql
- ../postgresql/procedures/01_auditlog_prep.sql:/docker-entrypoint-initdb.d/02_01_audit_prep.sql
- ../postgresql/procedures/02_maintenance.sql:/docker-entrypoint-initdb.d/02_02_maintenance.sql
- ../postgresql/procedures/03_enable_partitioning.sql:/docker-entrypoint-initdb.d/02_03_enable.sql
- ../postgresql/procedures/04_monitoring_view.sql:/docker-entrypoint-initdb.d/02_04_monitor.sql
```
The container will automatically execute these scripts on first startup, partitioning the tables.

View File

@@ -0,0 +1,20 @@
services:
postgres:
image: postgres:${PG_VERSION}
container_name: zabbix-db-test
environment:
POSTGRES_PASSWORD: zabbix
POSTGRES_USER: zabbix
POSTGRES_DB: zabbix
PGDATA: /var/lib/postgresql/data/pgdata
ports:
- "5432:5432"
volumes:
- ./init_scripts:/docker-entrypoint-initdb.d
tmpfs:
- /var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U zabbix"]
interval: 5s
timeout: 5s
retries: 5

View File

@@ -0,0 +1,5 @@
-- Create additional user for partitioning tasks
CREATE USER zbx_part WITH PASSWORD 'zbx_part';
GRANT CONNECT ON DATABASE zabbix TO zbx_part;
-- Grant usage on public schema (standard for PG 15+)
GRANT USAGE ON SCHEMA public TO zbx_part;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
-- ============================================================================
-- SCRIPT: 00_partitions_init.sql
-- DESCRIPTION: Creates the 'partitions' schema and configuration table.
-- Defines the structure for managing Zabbix partitioning.
-- ============================================================================
CREATE SCHEMA IF NOT EXISTS partitions;
-- Configuration table to store partitioning settings per table
CREATE TABLE IF NOT EXISTS partitions.config (
table_name text NOT NULL,
period text NOT NULL CHECK (period IN ('day', 'week', 'month', 'year')),
keep_history interval NOT NULL,
future_partitions integer NOT NULL DEFAULT 5,
last_updated timestamp WITH TIME ZONE DEFAULT (now() AT TIME ZONE 'UTC'),
PRIMARY KEY (table_name)
);
-- Table to track installed version of the partitioning solution
CREATE TABLE IF NOT EXISTS partitions.version (
version text PRIMARY KEY,
installed_at timestamp with time zone DEFAULT (now() AT TIME ZONE 'UTC'),
description text
);
-- Set initial version
INSERT INTO partitions.version (version, description) VALUES ('1.0', 'Initial release')
ON CONFLICT (version) DO NOTHING;
-- Default configuration for Zabbix tables (adjust as needed)
-- History tables: Daily partitions, keep 30 days
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('history', 'day', '30 days'),
('history_uint', 'day', '30 days'),
('history_str', 'day', '30 days'),
('history_log', 'day', '30 days'),
('history_text', 'day', '30 days')
ON CONFLICT (table_name) DO NOTHING;
-- Trends tables: Monthly partitions, keep 12 months
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('trends', 'month', '12 months'),
('trends_uint', 'month', '12 months')
ON CONFLICT (table_name) DO NOTHING;
-- Auditlog: Monthly partitions, keep 12 months
INSERT INTO partitions.config (table_name, period, keep_history) VALUES
('auditlog', 'month', '12 months')
ON CONFLICT (table_name) DO NOTHING;

View File

@@ -0,0 +1,27 @@
-- ============================================================================
-- SCRIPT: 01_auditlog_prep.sql
-- DESCRIPTION: Modifies the 'auditlog' table Primary Key to include 'clock'.
-- This is REQUIRED for range partitioning by 'clock'.
-- ============================================================================
DO $$
BEGIN
-- Check if PK needs modification
-- Original PK is typically on (auditid) named 'auditlog_pkey'
IF EXISTS (
SELECT 1 FROM pg_constraint
WHERE conname = 'auditlog_pkey'
AND conrelid = 'auditlog'::regclass
) THEN
-- Verify if 'clock' is already in PK (basic check)
-- Realistically, if 'auditlog_pkey' exists on default Zabbix, it's just (auditid).
RAISE NOTICE 'Dropping existing Primary Key on auditlog...';
ALTER TABLE auditlog DROP CONSTRAINT auditlog_pkey;
RAISE NOTICE 'Creating new Primary Key on auditlog (auditid, clock)...';
ALTER TABLE auditlog ADD PRIMARY KEY (auditid, clock);
ELSE
RAISE NOTICE 'Constraint auditlog_pkey not found. Skipping or already modified.';
END IF;
END $$;

View File

@@ -0,0 +1,189 @@
-- ============================================================================
-- SCRIPT: 02_maintenance.sql
-- DESCRIPTION: Core functions for Zabbix partitioning (Create, Drop, Maintain).
-- ============================================================================
-- Function to check if a partition exists
CREATE OR REPLACE FUNCTION partitions.partition_exists(p_partition_name text)
RETURNS boolean AS $$
BEGIN
RETURN EXISTS (
SELECT 1 FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = p_partition_name
AND n.nspname = 'public'
);
END;
$$ LANGUAGE plpgsql;
-- Function to create a partition
CREATE OR REPLACE PROCEDURE partitions.create_partition(
p_parent_table text,
p_start_time timestamp with time zone,
p_end_time timestamp with time zone,
p_period text
) LANGUAGE plpgsql AS $$
DECLARE
v_partition_name text;
v_start_ts bigint;
v_end_ts bigint;
v_suffix text;
BEGIN
-- (No changes needed for time here as passed params are already UTC-adjusted in caller)
v_start_ts := extract(epoch from p_start_time)::bigint;
v_end_ts := extract(epoch from p_end_time)::bigint;
IF p_period = 'month' THEN
v_suffix := to_char(p_start_time, 'YYYYMM');
ELSE
v_suffix := to_char(p_start_time, 'YYYYMMDD');
END IF;
v_partition_name := p_parent_table || '_p' || v_suffix;
IF NOT partitions.partition_exists(v_partition_name) THEN
EXECUTE format(
'CREATE TABLE public.%I PARTITION OF public.%I FOR VALUES FROM (%s) TO (%s)',
v_partition_name, p_parent_table, v_start_ts, v_end_ts
);
END IF;
END;
$$;
-- Function to drop old partitions
CREATE OR REPLACE PROCEDURE partitions.drop_old_partitions(
p_parent_table text,
p_retention interval,
p_period text
) LANGUAGE plpgsql AS $$
DECLARE
v_cutoff_ts bigint;
v_partition record;
v_partition_date timestamp with time zone;
v_suffix text;
BEGIN
-- Calculate cutoff timestamp
v_cutoff_ts := extract(epoch from (now() - p_retention))::bigint;
FOR v_partition IN
SELECT
child.relname AS partition_name
FROM pg_inherits
JOIN pg_class parent ON pg_inherits.inhparent = parent.oid
JOIN pg_class child ON pg_inherits.inhrelid = child.oid
WHERE parent.relname = p_parent_table
LOOP
-- Parse partition suffix to determine age
-- Format: parent_pYYYYMM or parent_pYYYYMMDD
v_suffix := substring(v_partition.partition_name from length(p_parent_table) + 3);
BEGIN
IF length(v_suffix) = 6 THEN -- YYYYMM
v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC';
-- For monthly, we check if the END of the month is older than retention?
-- Or just strict retention.
-- To be safe, adding 1 month to check vs cutoff.
IF extract(epoch from (v_partition_date + '1 month'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE public.%I', v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC';
IF extract(epoch from (v_partition_date + '1 day'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE public.%I', v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
END IF;
EXCEPTION WHEN OTHERS THEN
-- Ignore parsing errors for non-standard partitions
NULL;
END;
END LOOP;
END;
$$;
-- MAIN Procedure to maintain a single table
CREATE OR REPLACE PROCEDURE partitions.maintain_table(
p_table_name text,
p_period text,
p_keep_history interval,
p_future_partitions integer DEFAULT 5
) LANGUAGE plpgsql AS $$
DECLARE
v_start_time timestamp with time zone;
v_period_interval interval;
i integer;
v_past_iterations integer;
BEGIN
IF p_period = 'day' THEN
v_period_interval := '1 day'::interval;
v_start_time := date_trunc('day', now() AT TIME ZONE 'UTC');
-- Calculate how many past days cover the retention period
v_past_iterations := extract(day from p_keep_history)::integer;
-- Safety cap or ensure minimum? default 7 if null?
IF v_past_iterations IS NULL THEN v_past_iterations := 7; END IF;
ELSIF p_period = 'week' THEN
v_period_interval := '1 week'::interval;
v_start_time := date_trunc('week', now() AT TIME ZONE 'UTC');
v_past_iterations := (extract(day from p_keep_history) / 7)::integer;
ELSIF p_period = 'month' THEN
v_period_interval := '1 month'::interval;
v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC');
-- Approximate months
v_past_iterations := (extract(year from p_keep_history) * 12 + extract(month from p_keep_history))::integer;
-- Fallback if interval is just days (e.g. '365 days')
IF v_past_iterations = 0 THEN
v_past_iterations := (extract(day from p_keep_history) / 30)::integer;
END IF;
ELSE
RETURN;
END IF;
-- 1. Create Future Partitions (Current + Buffer)
FOR i IN 0..p_future_partitions LOOP
CALL partitions.create_partition(
p_table_name,
v_start_time + (i * v_period_interval),
v_start_time + ((i + 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
END LOOP;
-- 2. Create Past Partitions (Covering retention period)
IF v_past_iterations > 0 THEN
FOR i IN 1..v_past_iterations LOOP
CALL partitions.create_partition(
p_table_name,
v_start_time - (i * v_period_interval),
v_start_time - ((i - 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
END IF;
-- 3. Drop Old Partitions
CALL partitions.drop_old_partitions(p_table_name, p_keep_history, p_period);
-- 4. Update Metadata
UPDATE partitions.config SET last_updated = now() WHERE table_name = p_table_name;
END;
$$;
-- Global Maintenance Procedure
CREATE OR REPLACE PROCEDURE partitions.run_maintenance()
LANGUAGE plpgsql AS $$
DECLARE
v_row record;
BEGIN
FOR v_row IN SELECT * FROM partitions.config LOOP
CALL partitions.maintain_table(v_row.table_name, v_row.period, v_row.keep_history, v_row.future_partitions);
END LOOP;
END;
$$;

View File

@@ -0,0 +1,43 @@
-- ============================================================================
-- SCRIPT: 03_enable_partitioning.sql
-- DESCRIPTION: Converts standard Zabbix tables to Partitioned tables.
-- WARNING: This renames existing tables to *_old.
-- ============================================================================
DO $$
DECLARE
v_row record;
v_table text;
v_old_table text;
v_pk_sql text;
BEGIN
FOR v_row IN SELECT * FROM partitions.config LOOP
v_table := v_row.table_name;
v_old_table := v_table || '_old';
-- Check if table exists and is NOT already partitioned
IF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'r') THEN
RAISE NOTICE 'Converting table % to partitioned table...', v_table;
-- 1. Rename existing table
EXECUTE format('ALTER TABLE public.%I RENAME TO %I', v_table, v_old_table);
-- 2. Create new partitioned table (copying structure)
EXECUTE format('CREATE TABLE public.%I (LIKE public.%I INCLUDING ALL) PARTITION BY RANGE (clock)', v_table, v_old_table);
-- 3. Create initial partitions
RAISE NOTICE 'Creating initial partitions for %...', v_table;
CALL partitions.maintain_table(v_table, v_row.period, v_row.keep_history, v_row.future_partitions);
-- Optional: Migrate existing data
-- EXECUTE format('INSERT INTO public.%I SELECT * FROM public.%I', v_table, v_old_table);
ELSIF EXISTS (SELECT 1 FROM pg_class WHERE relname = v_table AND relkind = 'p') THEN
RAISE NOTICE 'Table % is already partitioned. Skipping conversion.', v_table;
-- Just run maintenance to ensure partitions exist
CALL partitions.run_maintenance();
ELSE
RAISE WARNING 'Table % not found!', v_table;
END IF;
END LOOP;
END $$;

View File

@@ -0,0 +1,28 @@
-- ============================================================================
-- SCRIPT: 04_monitoring_view.sql
-- DESCRIPTION: Creates a view to monitor partition status and sizes.
-- ============================================================================
CREATE OR REPLACE VIEW partitions.monitoring AS
SELECT
parent.relname AS parent_table,
c.table_name,
c.period,
c.keep_history,
count(child.relname) AS partition_count,
count(child.relname) FILTER (
WHERE
(c.period = 'day' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMMDD')))
OR
(c.period = 'month' AND child.relname > (parent.relname || '_p' || to_char(now(), 'YYYYMM')))
) AS future_partitions,
pg_size_pretty(sum(pg_total_relation_size(child.oid))) AS total_size,
min(child.relname) AS oldest_partition,
max(child.relname) AS newest_partition,
c.last_updated
FROM partitions.config c
JOIN pg_class parent ON parent.relname = c.table_name
LEFT JOIN pg_inherits ON pg_inherits.inhparent = parent.oid
LEFT JOIN pg_class child ON pg_inherits.inhrelid = child.oid
WHERE parent.relkind = 'p' -- Only partitioned tables
GROUP BY parent.relname, c.table_name, c.period, c.keep_history, c.last_updated;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
-- ============================================================================
-- SCRIPT: z_gen_history_data.sql
-- DESCRIPTION: Generates mock data for Zabbix history and trends tables.
-- Creates a dummy host and items if they don't exist.
-- ============================================================================
DO $$
DECLARE
v_hostid bigint := 900001;
v_groupid bigint := 900001;
v_interfaceid bigint := 900001;
v_itemid_start bigint := 900001;
v_start_time integer := extract(epoch from (now() - interval '7 days'))::integer;
v_end_time integer := extract(epoch from now())::integer;
i integer;
BEGIN
-- 1. CREATE DUMMY STRUCTURES
-- Host Group
INSERT INTO hstgrp (groupid, name, uuid, type)
VALUES (v_groupid, 'Partition Test Group', 'df77189c49034553999973d8e0500001', 0)
ON CONFLICT DO NOTHING;
-- Host
INSERT INTO hosts (hostid, host, name, status, uuid)
VALUES (v_hostid, 'partition-test-host', 'Partition Test Host', 0, 'df77189c49034553999973d8e0500002')
ON CONFLICT DO NOTHING;
-- Interface
INSERT INTO interface (interfaceid, hostid, main, type, useip, ip, dns, port)
VALUES (v_interfaceid, v_hostid, 1, 1, 1, '127.0.0.1', '', '10050')
ON CONFLICT DO NOTHING;
-- 2. CREATE DUMMY ITEMS AND GENERATE HISTORY
-- Item 1: Numeric Float (HISTORY)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 1, v_hostid, v_interfaceid, 'Test Float Item', 'test.float', 0, 0, '1m', 'df77189c49034553999973d8e0500003');
INSERT INTO history (itemid, clock, value, ns)
SELECT
v_itemid_start + 1,
ts,
random() * 100,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 1,
(ts / 3600) * 3600, -- Hourly truncation
60,
0,
50,
100
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 2: Numeric Unsigned (HISTORY_UINT)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 2, v_hostid, v_interfaceid, 'Test Uint Item', 'test.uint', 0, 3, '1m', 'df77189c49034553999973d8e0500004');
INSERT INTO history_uint (itemid, clock, value, ns)
SELECT
v_itemid_start + 2,
ts,
(random() * 1000)::integer,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends_uint (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 2,
(ts / 3600) * 3600,
60,
0,
500,
1000
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 3: Character (HISTORY_STR)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 3, v_hostid, v_interfaceid, 'Test Str Item', 'test.str', 0, 1, '1m', 'df77189c49034553999973d8e0500005');
INSERT INTO history_str (itemid, clock, value, ns)
SELECT
v_itemid_start + 3,
ts,
'test_value_' || ts,
0
FROM generate_series(v_start_time, v_end_time, 300) AS ts; -- Every 5 mins
END $$;

134
postgresql/docker/run_test_env.sh Executable file
View File

@@ -0,0 +1,134 @@
#!/bin/bash
# Default values
PG_VERSION=""
ZABBIX_VERSION=""
# Color codes
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
usage() {
echo "Usage: $0 --pg <16|17|18> --zabbix <7.0|7.4>"
echo "Example: $0 --pg 16 --zabbix 7.0"
exit 1
}
# Parse arguments
while [[ "$#" -gt 0 ]]; do
case $1 in
--pg) PG_VERSION="$2"; shift ;;
--zabbix) ZABBIX_VERSION="$2"; shift ;;
*) echo "Unknown parameter: $1"; usage ;;
esac
shift
done
if [[ -z "$PG_VERSION" || -z "$ZABBIX_VERSION" ]]; then
echo -e "${RED}Error: detailed arguments required.${NC}"
usage
fi
# Map Zabbix version to sql-scripts folder
if [[ "$ZABBIX_VERSION" == "7.0" ]]; then
SQL_DIR="../sql-scripts-70"
elif [[ "$ZABBIX_VERSION" == "7.4" ]]; then
SQL_DIR="../sql-scripts-74"
else
echo -e "${RED}Error: Unsupported Zabbix version. Use 7.0 or 7.4.${NC}"
exit 1
fi
echo -e "${GREEN}Preparing environment for PostgreSQL $PG_VERSION and Zabbix $ZABBIX_VERSION...${NC}"
# Cleanup previous run
echo "Cleaning up containers and volumes..."
docker compose down -v > /dev/null 2>&1
rm -rf init_scripts
mkdir -p init_scripts
# Symlink SQL scripts
echo "Setting up initialization scripts from $SQL_DIR..."
# 0. Extra Users
if [[ -f "../init_extra_users.sql" ]]; then
cp "../init_extra_users.sql" ./init_scripts/00_init_extra_users.sql
echo "Copied extra user init script."
fi
# 1. Schema
if [[ -f "$SQL_DIR/schema.sql" ]]; then
# Use 01_00 to ensure it comes before 01_10
cp "$SQL_DIR/schema.sql" ./init_scripts/01_00_schema.sql
# 1.1 Partitioning Infrastructure
if [[ -f "../procedures/00_partitions_init.sql" ]]; then
cp "../procedures/00_partitions_init.sql" ./init_scripts/01_10_partitions_init.sql
fi
if [[ -f "../procedures/01_auditlog_prep.sql" ]]; then
cp "../procedures/01_auditlog_prep.sql" ./init_scripts/01_20_auditlog_prep.sql
fi
if [[ -f "../procedures/02_maintenance.sql" ]]; then
cp "../procedures/02_maintenance.sql" ./init_scripts/01_30_maintenance.sql
fi
if [[ -f "../procedures/03_enable_partitioning.sql" ]]; then
cp "../procedures/03_enable_partitioning.sql" ./init_scripts/01_40_enable.sql
fi
if [[ -f "../procedures/04_monitoring_view.sql" ]]; then
cp "../procedures/04_monitoring_view.sql" ./init_scripts/01_50_monitoring.sql
fi
else
echo -e "${RED}Error: schema.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 2. Images
if [[ -f "$SQL_DIR/images.sql" ]]; then
cp "$SQL_DIR/images.sql" ./init_scripts/02_images.sql
else
echo -e "${RED}Error: images.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 3. Data
if [[ -f "$SQL_DIR/data.sql" ]]; then
cp "$SQL_DIR/data.sql" ./init_scripts/03_data.sql
else
echo -e "${RED}Error: data.sql not found in $SQL_DIR${NC}"
exit 1
fi
# 4. Mock History Data
if [[ -f "../z_gen_history_data.sql" ]]; then
cp "../z_gen_history_data.sql" ./init_scripts/04_gen_data.sql
echo "Copied mock data generator."
else
echo -e "${RED}Warning: z_gen_history_data.sql not found!${NC}"
fi
# Check logic for 7.4 vs 7.0 (file names might slightly differ or be organized differently if using packages, but assuming source layout provided)
# Export variable for Docker Compose
export PG_VERSION=$PG_VERSION
# Run Docker Compose
echo -e "${GREEN}Starting PostgreSQL container...${NC}"
docker compose up -d
echo -e "${GREEN}Waiting for database to be ready...${NC}"
# Simple wait loop
for i in {1..30}; do
if docker exec zabbix-db-test pg_isready -U zabbix > /dev/null 2>&1; then
echo -e "${GREEN}Database is ready!${NC}"
break
fi
echo -n "."
sleep 1
done
# Check if data generation finished (it runs as part of init, which might take a bit longer than just port open)
# We can check logs
echo "To follow initialization logs, run: docker logs -f zabbix-db-test"
echo -e "${GREEN}Environment ready.${NC}"
echo "Connect: psql -h localhost -p 5432 -U zabbix -d zabbix"

View File

@@ -0,0 +1,5 @@
-- Create additional user for partitioning tasks
CREATE USER zbx_part WITH PASSWORD 'zbx_part';
GRANT CONNECT ON DATABASE zabbix TO zbx_part;
-- Grant usage on public schema (standard for PG 15+)
GRANT USAGE ON SCHEMA public TO zbx_part;

View File

@@ -12,14 +12,14 @@ CREATE TABLE IF NOT EXISTS partitions.config (
period text NOT NULL CHECK (period IN ('day', 'week', 'month', 'year')),
keep_history interval NOT NULL,
future_partitions integer NOT NULL DEFAULT 5,
last_updated timestamp WITH TIME ZONE DEFAULT now(),
last_updated timestamp WITH TIME ZONE DEFAULT (now() AT TIME ZONE 'UTC'),
PRIMARY KEY (table_name)
);
-- Table to track installed version of the partitioning solution
CREATE TABLE IF NOT EXISTS partitions.version (
version text PRIMARY KEY,
installed_at timestamp with time zone DEFAULT now(),
installed_at timestamp with time zone DEFAULT (now() AT TIME ZONE 'UTC'),
description text
);

View File

@@ -29,6 +29,7 @@ DECLARE
v_end_ts bigint;
v_suffix text;
BEGIN
-- (No changes needed for time here as passed params are already UTC-adjusted in caller)
v_start_ts := extract(epoch from p_start_time)::bigint;
v_end_ts := extract(epoch from p_end_time)::bigint;
@@ -78,19 +79,21 @@ BEGIN
BEGIN
IF length(v_suffix) = 6 THEN -- YYYYMM
v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD');
v_partition_date := to_timestamp(v_suffix || '01', 'YYYYMMDD') AT TIME ZONE 'UTC';
-- For monthly, we check if the END of the month is older than retention?
-- Or just strict retention.
-- To be safe, adding 1 month to check vs cutoff.
IF extract(epoch from (v_partition_date + '1 month'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE public.%I', v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
ELSIF length(v_suffix) = 8 THEN -- YYYYMMDD
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD');
v_partition_date := to_timestamp(v_suffix, 'YYYYMMDD') AT TIME ZONE 'UTC';
IF extract(epoch from (v_partition_date + '1 day'::interval)) < v_cutoff_ts THEN
RAISE NOTICE 'Dropping old partition %', v_partition.partition_name;
EXECUTE format('DROP TABLE public.%I', v_partition.partition_name);
COMMIT; -- Release lock immediately
END IF;
END IF;
EXCEPTION WHEN OTHERS THEN
@@ -116,7 +119,7 @@ DECLARE
BEGIN
IF p_period = 'day' THEN
v_period_interval := '1 day'::interval;
v_start_time := date_trunc('day', now());
v_start_time := date_trunc('day', now() AT TIME ZONE 'UTC');
-- Calculate how many past days cover the retention period
v_past_iterations := extract(day from p_keep_history)::integer;
-- Safety cap or ensure minimum? default 7 if null?
@@ -124,12 +127,12 @@ BEGIN
ELSIF p_period = 'week' THEN
v_period_interval := '1 week'::interval;
v_start_time := date_trunc('week', now());
v_start_time := date_trunc('week', now() AT TIME ZONE 'UTC');
v_past_iterations := (extract(day from p_keep_history) / 7)::integer;
ELSIF p_period = 'month' THEN
v_period_interval := '1 month'::interval;
v_start_time := date_trunc('month', now());
v_start_time := date_trunc('month', now() AT TIME ZONE 'UTC');
-- Approximate months
v_past_iterations := (extract(year from p_keep_history) * 12 + extract(month from p_keep_history))::integer;
-- Fallback if interval is just days (e.g. '365 days')
@@ -148,6 +151,8 @@ BEGIN
v_start_time + ((i + 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
END LOOP;
-- 2. Create Past Partitions (Covering retention period)
@@ -159,6 +164,7 @@ BEGIN
v_start_time - ((i - 1) * v_period_interval),
p_period
);
COMMIT; -- Release lock immediately
END LOOP;
END IF;

View File

@@ -2,7 +2,13 @@
This is the declarative (PostgreSQL procedures based) partitioning implementation for Zabbix `history`, `trends`, and `auditlog` tables on PostgreSQL. This solution is intended to replace standard Zabbix housekeeping for the configured tables. Partitioning is very useful for large environments because it completely eliminates the housekeeper from the process. Instead of huge DELETE queries on several million rows, fast DDL queries (ALTER TABLE) are executed, which drop an entire partition.
## Architecture
4:
5: > [!CRITICAL]
6: > **High-Load Environments**:
7: > 1. **Data Visibility**: After enabling partitioning, old data remains in `*_old` tables and is **NOT visible** in Zabbix. You must migrate data manually if needed.
8: > 2. **Disable Housekeeping**: You **MUST** disable Zabbix Housekeeper for History and Trends in *Administration -> General -> Housekeeping*. Failure to do so will cause massive `DELETE` loads.
9:
10: ## Architecture
The solution uses PostgreSQL native declarative partitioning (`PARTITION BY RANGE`).
All procedures, information, statistics and configuration are stored in the `partitions` schema to maintain full separation from Zabbix schema.

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,49 @@
ALTER TABLE history RENAME TO history_old;
CREATE TABLE history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_uint RENAME TO history_uint_old;
CREATE TABLE history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_str RENAME TO history_str_old;
CREATE TABLE history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_log RENAME TO history_log_old;
CREATE TABLE history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_text RENAME TO history_text_old;
CREATE TABLE history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,16 @@
PATCHES = \
history_upgrade_prepare.sql
if DBSCHEMA
all: $(PATCHES)
history_upgrade_prepare.sql: $(top_srcdir)/create/bin/gen_history_upgrade.pl
$(top_srcdir)/create/bin/gen_history_upgrade.pl postgresql > $@
clean:
rm -f history_upgrade_prepare.sql
endif
EXTRA_DIST = $(PATCHES)
.PHONY: all clean

View File

@@ -0,0 +1,567 @@
# Makefile.in generated by automake 1.17 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2024 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = { \
if test -z '$(MAKELEVEL)'; then \
false; \
elif test -n '$(MAKE_HOST)'; then \
true; \
elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
true; \
else \
false; \
fi; \
}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
am__rm_f = rm -f $(am__rm_f_notfound)
am__rm_rf = rm -rf $(am__rm_f_notfound)
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = database/postgresql/option-patches
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ares.m4 \
$(top_srcdir)/m4/ax_lib_mysql.m4 \
$(top_srcdir)/m4/ax_lib_postgresql.m4 \
$(top_srcdir)/m4/ax_lib_sqlite3.m4 \
$(top_srcdir)/m4/check_enum.m4 $(top_srcdir)/m4/iconv.m4 \
$(top_srcdir)/m4/ldap.m4 $(top_srcdir)/m4/libcurl.m4 \
$(top_srcdir)/m4/libevent.m4 $(top_srcdir)/m4/libgnutls.m4 \
$(top_srcdir)/m4/libmodbus.m4 $(top_srcdir)/m4/libopenssl.m4 \
$(top_srcdir)/m4/libssh.m4 $(top_srcdir)/m4/libssh2.m4 \
$(top_srcdir)/m4/libunixodbc.m4 $(top_srcdir)/m4/libxml2.m4 \
$(top_srcdir)/m4/netsnmp.m4 $(top_srcdir)/m4/openipmi.m4 \
$(top_srcdir)/m4/pcre2.m4 $(top_srcdir)/m4/pthread.m4 \
$(top_srcdir)/m4/resolv.m4 $(top_srcdir)/m4/times.m4 \
$(top_srcdir)/m4/zlib.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/include/common/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
SOURCES =
DIST_SOURCES =
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
am__DIST_COMMON = $(srcdir)/Makefile.in
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AGENT2_CONFIG_DIR = @AGENT2_CONFIG_DIR@
AGENT2_CONFIG_FILE = @AGENT2_CONFIG_FILE@
AGENT2_LDFLAGS = @AGENT2_LDFLAGS@
AGENT2_LIBS = @AGENT2_LIBS@
AGENT2_PLUGIN_CONFIG_DIR = @AGENT2_PLUGIN_CONFIG_DIR@
AGENT_CONFIG_FILE = @AGENT_CONFIG_FILE@
AGENT_LDFLAGS = @AGENT_LDFLAGS@
AGENT_LIBS = @AGENT_LIBS@
ALERT_SCRIPTS_PATH = @ALERT_SCRIPTS_PATH@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
ARCH = @ARCH@
ARES_CFLAGS = @ARES_CFLAGS@
ARES_LDFLAGS = @ARES_LDFLAGS@
ARES_LIBS = @ARES_LIBS@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CGO_CFLAGS = @CGO_CFLAGS@
CGO_LDFLAGS = @CGO_LDFLAGS@
CPPFLAGS = @CPPFLAGS@
CSCOPE = @CSCOPE@
CTAGS = @CTAGS@
CURL_SSL_CERT_LOCATION = @CURL_SSL_CERT_LOCATION@
CURL_SSL_KEY_LOCATION = @CURL_SSL_KEY_LOCATION@
CXX = @CXX@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DB_CFLAGS = @DB_CFLAGS@
DB_LDFLAGS = @DB_LDFLAGS@
DB_LIBS = @DB_LIBS@
DEFAULT_INCLUDES = @DEFAULT_INCLUDES@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
ETAGS = @ETAGS@
EXEEXT = @EXEEXT@
EXTERNAL_SCRIPTS_PATH = @EXTERNAL_SCRIPTS_PATH@
GNUTLS_CFLAGS = @GNUTLS_CFLAGS@
GNUTLS_LDFLAGS = @GNUTLS_LDFLAGS@
GNUTLS_LIBS = @GNUTLS_LIBS@
GO = @GO@
GREP = @GREP@
ICONV_CFLAGS = @ICONV_CFLAGS@
ICONV_LDFLAGS = @ICONV_LDFLAGS@
ICONV_LIBS = @ICONV_LIBS@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
JAR = @JAR@
JAVAC = @JAVAC@
LDAP_CPPFLAGS = @LDAP_CPPFLAGS@
LDAP_LDFLAGS = @LDAP_LDFLAGS@
LDAP_LIBS = @LDAP_LIBS@
LDFLAGS = @LDFLAGS@
LIBCURL_CFLAGS = @LIBCURL_CFLAGS@
LIBCURL_LDFLAGS = @LIBCURL_LDFLAGS@
LIBCURL_LIBS = @LIBCURL_LIBS@
LIBEVENT_CFLAGS = @LIBEVENT_CFLAGS@
LIBEVENT_LDFLAGS = @LIBEVENT_LDFLAGS@
LIBEVENT_LIBS = @LIBEVENT_LIBS@
LIBMODBUS_CFLAGS = @LIBMODBUS_CFLAGS@
LIBMODBUS_LDFLAGS = @LIBMODBUS_LDFLAGS@
LIBMODBUS_LIBS = @LIBMODBUS_LIBS@
LIBOBJS = @LIBOBJS@
LIBPCRE2_CFLAGS = @LIBPCRE2_CFLAGS@
LIBPCRE2_LDFLAGS = @LIBPCRE2_LDFLAGS@
LIBPCRE2_LIBS = @LIBPCRE2_LIBS@
LIBPTHREAD_CFLAGS = @LIBPTHREAD_CFLAGS@
LIBPTHREAD_LDFLAGS = @LIBPTHREAD_LDFLAGS@
LIBPTHREAD_LIBS = @LIBPTHREAD_LIBS@
LIBS = @LIBS@
LIBXML2_CFLAGS = @LIBXML2_CFLAGS@
LIBXML2_LDFLAGS = @LIBXML2_LDFLAGS@
LIBXML2_LIBS = @LIBXML2_LIBS@
LIBXML2_VERSION = @LIBXML2_VERSION@
LOAD_MODULE_PATH = @LOAD_MODULE_PATH@
LTLIBOBJS = @LTLIBOBJS@
MAKEINFO = @MAKEINFO@
MKDIR_P = @MKDIR_P@
MYSQL_CFLAGS = @MYSQL_CFLAGS@
MYSQL_CONFIG = @MYSQL_CONFIG@
MYSQL_LDFLAGS = @MYSQL_LDFLAGS@
MYSQL_LIBS = @MYSQL_LIBS@
MYSQL_VERSION = @MYSQL_VERSION@
OBJEXT = @OBJEXT@
ODBC_CONFIG = @ODBC_CONFIG@
OPENIPMI_CFLAGS = @OPENIPMI_CFLAGS@
OPENIPMI_LDFLAGS = @OPENIPMI_LDFLAGS@
OPENIPMI_LIBS = @OPENIPMI_LIBS@
OPENSSL_CFLAGS = @OPENSSL_CFLAGS@
OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@
OPENSSL_LIBS = @OPENSSL_LIBS@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PG_CONFIG = @PG_CONFIG@
PKG_CONFIG = @PKG_CONFIG@
PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
POSTGRESQL_CFLAGS = @POSTGRESQL_CFLAGS@
POSTGRESQL_LDFLAGS = @POSTGRESQL_LDFLAGS@
POSTGRESQL_LIBS = @POSTGRESQL_LIBS@
POSTGRESQL_VERSION = @POSTGRESQL_VERSION@
POW_LIB = @POW_LIB@
PROXY_CONFIG_FILE = @PROXY_CONFIG_FILE@
PROXY_LDFLAGS = @PROXY_LDFLAGS@
PROXY_LIBS = @PROXY_LIBS@
RANLIB = @RANLIB@
RESOLV_LIBS = @RESOLV_LIBS@
SENDER_LDFLAGS = @SENDER_LDFLAGS@
SENDER_LIBS = @SENDER_LIBS@
SERVER_CONFIG_FILE = @SERVER_CONFIG_FILE@
SERVER_LDFLAGS = @SERVER_LDFLAGS@
SERVER_LIBS = @SERVER_LIBS@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
SNMP_CFLAGS = @SNMP_CFLAGS@
SNMP_LDFLAGS = @SNMP_LDFLAGS@
SNMP_LIBS = @SNMP_LIBS@
SQLITE3_CPPFLAGS = @SQLITE3_CPPFLAGS@
SQLITE3_LDFLAGS = @SQLITE3_LDFLAGS@
SQLITE3_LIBS = @SQLITE3_LIBS@
SQLITE3_VERSION = @SQLITE3_VERSION@
SSH2_CFLAGS = @SSH2_CFLAGS@
SSH2_LDFLAGS = @SSH2_LDFLAGS@
SSH2_LIBS = @SSH2_LIBS@
SSH_CFLAGS = @SSH_CFLAGS@
SSH_LDFLAGS = @SSH_LDFLAGS@
SSH_LIBS = @SSH_LIBS@
STRIP = @STRIP@
TLS_CFLAGS = @TLS_CFLAGS@
TLS_LDFLAGS = @TLS_LDFLAGS@
TLS_LIBS = @TLS_LIBS@
UNIXODBC_CFLAGS = @UNIXODBC_CFLAGS@
UNIXODBC_LDFLAGS = @UNIXODBC_LDFLAGS@
UNIXODBC_LIBS = @UNIXODBC_LIBS@
VERSION = @VERSION@
WEBSERVICE_CONFIG_FILE = @WEBSERVICE_CONFIG_FILE@
ZBXGET_LDFLAGS = @ZBXGET_LDFLAGS@
ZBXGET_LIBS = @ZBXGET_LIBS@
ZBXJS_LDFLAGS = @ZBXJS_LDFLAGS@
ZBXJS_LIBS = @ZBXJS_LIBS@
ZLIB_CFLAGS = @ZLIB_CFLAGS@
ZLIB_LDFLAGS = @ZLIB_LDFLAGS@
ZLIB_LIBS = @ZLIB_LIBS@
_libcurl_config = @_libcurl_config@
_libnetsnmp_config = @_libnetsnmp_config@
_libnetsnmp_config_bin = @_libnetsnmp_config_bin@
_libnetsnmp_config_root = @_libnetsnmp_config_root@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__rm_f_notfound = @am__rm_f_notfound@
am__tar = @am__tar@
am__untar = @am__untar@
am__xargs_n = @am__xargs_n@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
runstatedir = @runstatedir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
PATCHES = \
history_upgrade_prepare.sql
EXTRA_DIST = $(PATCHES)
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu database/postgresql/option-patches/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --gnu database/postgresql/option-patches/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
tags TAGS:
ctags CTAGS:
cscope cscopelist:
distdir: $(BUILT_SOURCES)
$(MAKE) $(AM_MAKEFLAGS) distdir-am
distdir-am: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-$(am__rm_f) $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || $(am__rm_f) $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
@DBSCHEMA_FALSE@clean: clean-am
clean-am: clean-generic mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am:
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am:
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-generic cscopelist-am \
ctags-am distclean distclean-generic distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-man install-pdf install-pdf-am \
install-ps install-ps-am install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-generic pdf \
pdf-am ps ps-am tags-am uninstall uninstall-am
.PRECIOUS: Makefile
@DBSCHEMA_TRUE@all: $(PATCHES)
@DBSCHEMA_TRUE@history_upgrade_prepare.sql: $(top_srcdir)/create/bin/gen_history_upgrade.pl
@DBSCHEMA_TRUE@ $(top_srcdir)/create/bin/gen_history_upgrade.pl postgresql > $@
@DBSCHEMA_TRUE@clean:
@DBSCHEMA_TRUE@ rm -f history_upgrade_prepare.sql
.PHONY: all clean
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
# Tell GNU make to disable its built-in pattern rules.
%:: %,v
%:: RCS/%,v
%:: RCS/%
%:: s.%
%:: SCCS/s.%

View File

@@ -0,0 +1,49 @@
ALTER TABLE history RENAME TO history_old;
CREATE TABLE history (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value DOUBLE PRECISION DEFAULT '0.0000' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_uint RENAME TO history_uint_old;
CREATE TABLE history_uint (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value numeric(20) DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_str RENAME TO history_str_old;
CREATE TABLE history_str (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value varchar(255) DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_log RENAME TO history_log_old;
CREATE TABLE history_log (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
timestamp integer DEFAULT '0' NOT NULL,
source varchar(64) DEFAULT '' NOT NULL,
severity integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
logeventid integer DEFAULT '0' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);
ALTER TABLE history_text RENAME TO history_text_old;
CREATE TABLE history_text (
itemid bigint NOT NULL,
clock integer DEFAULT '0' NOT NULL,
value text DEFAULT '' NOT NULL,
ns integer DEFAULT '0' NOT NULL,
PRIMARY KEY (itemid,clock,ns)
);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,91 @@
-- ============================================================================
-- SCRIPT: z_gen_history_data.sql
-- DESCRIPTION: Generates mock data for Zabbix history and trends tables.
-- Creates a dummy host and items if they don't exist.
-- ============================================================================
DO $$
DECLARE
v_hostid bigint := 900001;
v_groupid bigint := 900001;
v_interfaceid bigint := 900001;
v_itemid_start bigint := 900001;
v_start_time integer := extract(epoch from (now() - interval '7 days'))::integer;
v_end_time integer := extract(epoch from now())::integer;
i integer;
BEGIN
-- 1. CREATE DUMMY STRUCTURES
-- Host Group
INSERT INTO hstgrp (groupid, name, uuid, type)
VALUES (v_groupid, 'Partition Test Group', 'df77189c49034553999973d8e0500001', 0)
ON CONFLICT DO NOTHING;
-- Host
INSERT INTO hosts (hostid, host, name, status, uuid)
VALUES (v_hostid, 'partition-test-host', 'Partition Test Host', 0, 'df77189c49034553999973d8e0500002')
ON CONFLICT DO NOTHING;
-- Interface
INSERT INTO interface (interfaceid, hostid, main, type, useip, ip, dns, port)
VALUES (v_interfaceid, v_hostid, 1, 1, 1, '127.0.0.1', '', '10050')
ON CONFLICT DO NOTHING;
-- 2. CREATE DUMMY ITEMS AND GENERATE HISTORY
-- Item 1: Numeric Float (HISTORY)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 1, v_hostid, v_interfaceid, 'Test Float Item', 'test.float', 0, 0, '1m', 'df77189c49034553999973d8e0500003');
INSERT INTO history (itemid, clock, value, ns)
SELECT
v_itemid_start + 1,
ts,
random() * 100,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 1,
(ts / 3600) * 3600, -- Hourly truncation
60,
0,
50,
100
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 2: Numeric Unsigned (HISTORY_UINT)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 2, v_hostid, v_interfaceid, 'Test Uint Item', 'test.uint', 0, 3, '1m', 'df77189c49034553999973d8e0500004');
INSERT INTO history_uint (itemid, clock, value, ns)
SELECT
v_itemid_start + 2,
ts,
(random() * 1000)::integer,
0
FROM generate_series(v_start_time, v_end_time, 60) AS ts;
INSERT INTO trends_uint (itemid, clock, num, value_min, value_avg, value_max)
SELECT
v_itemid_start + 2,
(ts / 3600) * 3600,
60,
0,
500,
1000
FROM generate_series(v_start_time, v_end_time, 3600) AS ts;
-- Item 3: Character (HISTORY_STR)
INSERT INTO items (itemid, hostid, interfaceid, name, key_, type, value_type, delay, uuid)
VALUES (v_itemid_start + 3, v_hostid, v_interfaceid, 'Test Str Item', 'test.str', 0, 1, '1m', 'df77189c49034553999973d8e0500005');
INSERT INTO history_str (itemid, clock, value, ns)
SELECT
v_itemid_start + 3,
ts,
'test_value_' || ts,
0
FROM generate_series(v_start_time, v_end_time, 300) AS ts; -- Every 5 mins
END $$;