From 03e353431df7a41404ccee76d9b7c6fc0105bf9e Mon Sep 17 00:00:00 2001 From: chad Date: Wed, 7 May 2025 18:30:46 -0500 Subject: [PATCH 1/3] feat: use partman and s3 for migrations --- docker-compose.yaml | 50 +++- ofelia.ini | 4 + prometheus.yml | 13 +- scripts/db-maintenance/.env.example | 6 + scripts/db-maintenance/archive-partitions.sql | 252 ++++++++++++++++++ scripts/db-maintenance/init-partioning.sql | 93 +++++++ scripts/db-maintenance/restore-from-s3.sql | 97 +++++++ scripts/db-maintenance/run-maintenance.sh | 16 ++ 8 files changed, 525 insertions(+), 6 deletions(-) create mode 100644 ofelia.ini create mode 100644 scripts/db-maintenance/.env.example create mode 100644 scripts/db-maintenance/archive-partitions.sql create mode 100644 scripts/db-maintenance/init-partioning.sql create mode 100644 scripts/db-maintenance/restore-from-s3.sql create mode 100644 scripts/db-maintenance/run-maintenance.sh diff --git a/docker-compose.yaml b/docker-compose.yaml index c989f6de..81e66ca1 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -90,6 +90,7 @@ services: - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} volumes: - postgres-data:/var/lib/postgresql/data + - ./scripts/db-maintenance:/docker-entrypoint-initdb.d ports: - "${POSTGRES_PORT:-5432}:5432" env_file: .env @@ -100,6 +101,12 @@ services: interval: 10s timeout: 5s retries: 5 + command: > + bash -c " + apt-get update && + apt-get install -y postgresql-13-partman && + docker-entrypoint.sh postgres + " prometheus: image: prom/prometheus:v3.1.0 @@ -204,6 +211,41 @@ services: tempo: condition: service_healthy required: true + postgres-maintenance: + image: postgres:13 + platform: ${PLATFORM:-} + depends_on: + postgres-db: + condition: service_healthy + volumes: + - ./db-maintenance:/scripts + - postgres-data:/var/lib/postgresql/data:ro + - ./s3-archives:/s3-archives + environment: + - POSTGRES_DB=${POSTGRES_DB} + - POSTGRES_USER=${POSTGRES_USER} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - PGHOST=postgres-db + - PGPORT=5432 + env_file: .env + networks: + - atoma-network + command: /bin/bash -c "sleep 30 && /scripts/run-maintenance.sh" + profiles: + - maintenance + + postgres-cron: + image: mcuadros/ofelia:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + command: daemon --docker + environment: + - DOCKER_COMPOSE_PROJECT=atoma-network + networks: + - atoma-network + restart: always + depends_on: + - postgres-db atoma-node: <<: *atoma-node @@ -303,7 +345,7 @@ services: - CUDA_VISIBLE_DEVICES=1 ipc: host command: ${VLLM_ENGINE_ARGS} - + vllm2: <<: *inference-service-cuda container_name: chat-completions2 @@ -368,7 +410,7 @@ services: <<: *inference-service-cuda container_name: chat-completions5 profiles: [chat_completions_vllm] - image: vllm/vllm-openai:v0.8.1 + image: vllm/vllm-openai:v0.8.1 environment: # Backend for attention computation # Available options: @@ -397,7 +439,7 @@ services: # - "XFORMERS": use XFormers # - "ROCM_FLASH": use ROCmFlashAttention # - "FLASHINFER": use flashinfer (recommended for fp8 quantized models) - - VLLM_ATTENTION_BACKEND=FLASH_ATTN + - VLLM_ATTENTION_BACKEND=FLASH_ATTN - VLLM_FLASH_ATTN_VERSION=3 - VLLM_USE_V1=1 - CUDA_VISIBLE_DEVICES=6 @@ -443,7 +485,7 @@ services: - CUDA_VISIBLE_DEVICES=8 ipc: host command: ${VLLM_ENGINE_ARGS} - + vllm-cpu: <<: *inference-service-cpu container_name: chat-completions diff --git a/ofelia.ini b/ofelia.ini new file mode 100644 index 00000000..0b3d7c7d --- /dev/null +++ b/ofelia.ini @@ -0,0 +1,4 @@ +[job "archive-postgres-monthly"] +schedule = @monthly +container = atoma-network_postgres-maintenance_1 +command = /scripts/db-maintenance/run-maintenance.sh \ No newline at end of file diff --git a/prometheus.yml b/prometheus.yml index 396c3d8e..fbfd22c2 100644 --- a/prometheus.yml +++ b/prometheus.yml @@ -26,13 +26,13 @@ scrape_configs: - job_name: "vllm3" metrics_path: "/metrics" static_configs: - - targets: ["vllm3:8000"] + - targets: ["vllm3:8000"] - job_name: "vllm4" metrics_path: "/metrics" static_configs: - targets: ["vllm4:8000"] - + - job_name: "vllm5" metrics_path: "/metrics" static_configs: @@ -61,3 +61,12 @@ scrape_configs: - action: labelmap regex: ^(atoma|libp2p)_(.+)$ replacement: $1_$2 + - job_name: "postgres_archival" + metrics_path: "/metrics" + static_configs: + - targets: ["postgres-db:5432"] + relabel_configs: + - source_labels: [__address__] + target_label: instance + regex: "(.*):.*" + replacement: "$1" diff --git a/scripts/db-maintenance/.env.example b/scripts/db-maintenance/.env.example new file mode 100644 index 00000000..cf6a9b5c --- /dev/null +++ b/scripts/db-maintenance/.env.example @@ -0,0 +1,6 @@ +# AWS S3 Configuration for Database Archiving +AWS_ACCESS_KEY_ID=your_access_key +AWS_SECRET_KEY=your_secret_key +AWS_REGION=us-east-1 +S3_BUCKET=your-bucket-name +S3_PREFIX=atoma-db-archives \ No newline at end of file diff --git a/scripts/db-maintenance/archive-partitions.sql b/scripts/db-maintenance/archive-partitions.sql new file mode 100644 index 00000000..26529247 --- /dev/null +++ b/scripts/db-maintenance/archive-partitions.sql @@ -0,0 +1,252 @@ +-- Create a logging table for archive operations +CREATE TABLE IF NOT EXISTS archive_log ( + id SERIAL PRIMARY KEY, + operation VARCHAR(50), + table_name TEXT, + s3_path TEXT, + rows_affected BIGINT, + status VARCHAR(50), + error_message TEXT, + executed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Configure AWS credentials +-- Note: In production, use IAM roles instead of embedding credentials +DO $ $ BEGIN EXECUTE 'ALTER DATABASE ' || current_database() || ' SET aws_s3.aws_access_key_id TO ''' || current_setting('aws_access_key_id', true) || ''''; + +EXECUTE 'ALTER DATABASE ' || current_database() || ' SET aws_s3.aws_secret_access_key TO ''' || current_setting('aws_secret_key', true) || ''''; + +EXECUTE 'ALTER DATABASE ' || current_database() || ' SET aws_s3.aws_region TO ''' || coalesce(current_setting('aws_region', true), 'us-east-1') || ''''; + +END $ $; + +-- Function to archive a partition to S3 +CREATE +OR REPLACE FUNCTION archive_partition_to_s3( + partition_table_name TEXT, + s3_bucket TEXT, + s3_prefix TEXT +) RETURNS BIGINT AS $ $ DECLARE s3_path TEXT; + +rows_affected BIGINT; + +current_time TEXT; + +BEGIN -- Generate a timestamp for the archive +current_time := to_char(now(), 'YYYY_MM_DD_HH24_MI_SS'); + +-- Set the complete S3 path +s3_path := s3_prefix || '/' || partition_table_name || '/' || current_time || '.csv'; + +-- Count rows in the partition +EXECUTE 'SELECT COUNT(*) FROM ' || quote_ident(partition_table_name) INTO rows_affected; + +-- Log the operation start +INSERT INTO + archive_log ( + operation, + table_name, + s3_path, + rows_affected, + status + ) +VALUES + ( + 'ARCHIVE_START', + partition_table_name, + s3_path, + rows_affected, + 'IN_PROGRESS' + ); + +-- Export the data to S3 +PERFORM aws_s3.table_export_to_s3( + 'SELECT * FROM ' || quote_ident(partition_table_name), + aws_commons.create_s3_uri( + s3_bucket, + s3_path, + current_setting('aws_s3.aws_region', true) + ), + options := 'FORMAT CSV, HEADER true' +); + +-- Log successful completion +INSERT INTO + archive_log ( + operation, + table_name, + s3_path, + rows_affected, + status + ) +VALUES + ( + 'ARCHIVE_COMPLETE', + partition_table_name, + s3_path, + rows_affected, + 'SUCCESS' + ); + +-- Create a local backup +COPY ( + SELECT + * + FROM + partition_table_name +) TO '/s3-archives/' || partition_table_name || '_' || current_time || '.csv' WITH (FORMAT CSV, HEADER true); + +RETURN rows_affected; + +EXCEPTION +WHEN OTHERS THEN -- Log error +INSERT INTO + archive_log ( + operation, + table_name, + s3_path, + rows_affected, + status, + error_message + ) +VALUES + ( + 'ARCHIVE_ERROR', + partition_table_name, + s3_path, + rows_affected, + 'ERROR', + SQLERRM + ); + +RAISE; + +END; + +$ $ LANGUAGE plpgsql; + +-- Function to find and archive old data +CREATE +OR REPLACE FUNCTION archive_old_data( + months_to_keep INT, + s3_bucket TEXT, + s3_prefix TEXT +) RETURNS TABLE (table_name TEXT, rows_archived BIGINT) AS $ $ DECLARE archive_date BIGINT; + +rec RECORD; + +BEGIN -- Calculate the cutoff epoch (assuming epoch is in milliseconds) +archive_date := extract( + epoch + from + ( + CURRENT_DATE - (months_to_keep || ' months') :: interval + ) +) * 1000; + +-- Archive old tasks +FOR rec IN +SELECT + 'tasks' as table_name, + task_small_id +FROM + tasks +WHERE + deprecated_at_epoch < archive_date + AND is_deprecated = true LOOP table_name := rec.table_name; + +rows_archived := archive_partition_to_s3( + 'tasks_' || rec.task_small_id, + s3_bucket, + s3_prefix +); + +RETURN NEXT; + +END LOOP; + +-- Archive old stacks +FOR rec IN +SELECT + 'stacks' as table_name, + stack_small_id +FROM + stacks s + JOIN tasks t ON s.task_small_id = t.task_small_id +WHERE + t.deprecated_at_epoch < archive_date + AND t.is_deprecated = true LOOP table_name := rec.table_name; + +rows_archived := archive_partition_to_s3( + 'stacks_' || rec.stack_small_id, + s3_bucket, + s3_prefix +); + +RETURN NEXT; + +END LOOP; + +-- Archive related settlement tickets +FOR rec IN +SELECT + 'stack_settlement_tickets' as table_name, + sst.stack_small_id +FROM + stack_settlement_tickets sst + JOIN stacks s ON sst.stack_small_id = s.stack_small_id + JOIN tasks t ON s.task_small_id = t.task_small_id +WHERE + t.deprecated_at_epoch < archive_date + AND t.is_deprecated = true LOOP table_name := rec.table_name; + +rows_archived := archive_partition_to_s3( + 'stack_settlement_tickets_' || rec.stack_small_id, + s3_bucket, + s3_prefix +); + +RETURN NEXT; + +END LOOP; + +-- Archive related disputes +FOR rec IN +SELECT + 'stack_attestation_disputes' as table_name, + sad.stack_small_id +FROM + stack_attestation_disputes sad + JOIN stacks s ON sad.stack_small_id = s.stack_small_id + JOIN tasks t ON s.task_small_id = t.task_small_id +WHERE + t.deprecated_at_epoch < archive_date + AND t.is_deprecated = true LOOP table_name := rec.table_name; + +rows_archived := archive_partition_to_s3( + 'stack_attestation_disputes_' || rec.stack_small_id, + s3_bucket, + s3_prefix +); + +RETURN NEXT; + +END LOOP; + +RETURN; + +END; + +$ $ LANGUAGE plpgsql; + +-- Run the archive function +SELECT + * +FROM + archive_old_data( + 24, + -- Keep 24 months of data in PostgreSQL + 'your-s3-bucket-name', + -- Replace with your S3 bucket + 'database-archives' -- S3 prefix + ); \ No newline at end of file diff --git a/scripts/db-maintenance/init-partioning.sql b/scripts/db-maintenance/init-partioning.sql new file mode 100644 index 00000000..a94e8cb0 --- /dev/null +++ b/scripts/db-maintenance/init-partioning.sql @@ -0,0 +1,93 @@ +-- Create the partman schema if it doesn't exist +CREATE SCHEMA IF NOT EXISTS partman; + +-- Check if the users table needs to be converted to a partitioned table +DO $ $ BEGIN -- Check if users table exists but is not partitioned +IF EXISTS ( + SELECT + 1 + FROM + pg_tables + WHERE + tablename = 'users' + AND schemaname = 'public' +) +AND NOT EXISTS ( + SELECT + 1 + FROM + pg_partitioned_table pt + JOIN pg_class c ON pt.partrelid = c.oid + JOIN pg_namespace n ON c.relnamespace = n.oid + WHERE + n.nspname = 'public' + AND c.relname = 'users' +) THEN -- Rename the existing table +EXECUTE 'ALTER TABLE users RENAME TO users_original'; + +-- Create a new partitioned table +EXECUTE ' + CREATE TABLE users ( + id SERIAL, + -- Copy your schema from the original table + -- You may need to adjust this according to your actual schema + -- Use: \d users_original in psql to see the schema + created_at TIMESTAMP NOT NULL + -- Add all other columns here + ) PARTITION BY RANGE (created_at)'; + +-- Create the partition maintenance function +PERFORM partman.create_parent( + 'public.users', + 'created_at', + 'time', + 'monthly', + p_template_table = > NULL, + p_retention = > '36 months', + p_start_partition = > date_trunc('month', CURRENT_DATE - interval '3 years') :: text +); + +-- Copy data from the original table to the partitioned table +EXECUTE 'INSERT INTO users SELECT * FROM users_original'; + +-- Optional: drop the original table after verifying data +-- EXECUTE 'DROP TABLE users_original'; +RAISE NOTICE 'Converted users table to partitioned table'; + +ELSIF NOT EXISTS ( + SELECT + 1 + FROM + pg_tables + WHERE + tablename = 'users' + AND schemaname = 'public' +) THEN -- Create a new partitioned table if it doesn't exist +EXECUTE ' + CREATE TABLE users ( + id SERIAL, + -- Define your schema + created_at TIMESTAMP NOT NULL + -- Add all other columns here + ) PARTITION BY RANGE (created_at)'; + +-- Create the partition maintenance function +PERFORM partman.create_parent( + 'public.users', + 'created_at', + 'time', + 'monthly', + p_template_table = > NULL, + p_retention = > '36 months', + p_start_partition = > date_trunc('month', CURRENT_DATE - interval '3 years') :: text +); + +RAISE NOTICE 'Created new partitioned users table'; + +END IF; + +END $ $; + +-- Set up the maintenance job for pg_partman +SELECT + partman.run_maintenance(p_analyze := true); \ No newline at end of file diff --git a/scripts/db-maintenance/restore-from-s3.sql b/scripts/db-maintenance/restore-from-s3.sql new file mode 100644 index 00000000..b9e3c01d --- /dev/null +++ b/scripts/db-maintenance/restore-from-s3.sql @@ -0,0 +1,97 @@ +-- Function to restore data from S3 +CREATE +OR REPLACE FUNCTION restore_from_s3( + target_table TEXT, + s3_bucket TEXT, + s3_path TEXT +) RETURNS BIGINT AS $ $ DECLARE temp_table TEXT; + +row_count BIGINT; + +BEGIN -- Create a unique temp table name +temp_table := 'temp_import_' || to_char(now(), 'YYYYMMDD_HH24MISS'); + +-- Create temp table with same structure as target +EXECUTE 'CREATE TEMP TABLE ' || temp_table || ' (LIKE ' || quote_ident(target_table) || ')'; + +-- Log the operation start +INSERT INTO + archive_log (operation, table_name, s3_path, status) +VALUES + ( + 'RESTORE_START', + target_table, + s3_path, + 'IN_PROGRESS' + ); + +-- Import data from S3 +PERFORM aws_s3.table_import_from_s3( + temp_table, + '', + -- Use all columns + '(FORMAT csv, HEADER true)', + aws_commons.create_s3_uri( + s3_bucket, + s3_path, + current_setting('aws_s3.aws_region', true) + ) +); + +-- Count rows imported +EXECUTE 'SELECT COUNT(*) FROM ' || temp_table INTO row_count; + +-- Insert into target table +EXECUTE 'INSERT INTO ' || quote_ident(target_table) || ' SELECT * FROM ' || temp_table; + +-- Drop temp table +EXECUTE 'DROP TABLE ' || temp_table; + +-- Log successful completion +INSERT INTO + archive_log ( + operation, + table_name, + s3_path, + rows_affected, + status + ) +VALUES + ( + 'RESTORE_COMPLETE', + target_table, + s3_path, + row_count, + 'SUCCESS' + ); + +RETURN row_count; + +EXCEPTION +WHEN OTHERS THEN -- Log error +INSERT INTO + archive_log ( + operation, + table_name, + s3_path, + status, + error_message + ) +VALUES + ( + 'RESTORE_ERROR', + target_table, + s3_path, + 'ERROR', + SQLERRM + ); + +-- Clean up +EXECUTE 'DROP TABLE IF EXISTS ' || temp_table; + +-- Rethrow the exception +RAISE; + +END; + +$ $ LANGUAGE plpgsql; \ No newline at end of file diff --git a/scripts/db-maintenance/run-maintenance.sh b/scripts/db-maintenance/run-maintenance.sh new file mode 100644 index 00000000..1cec9649 --- /dev/null +++ b/scripts/db-maintenance/run-maintenance.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -e + +echo "Starting database maintenance operations..." + +# Install required extensions +psql -v ON_ERROR_STOP=1 -d "$POSTGRES_DB" -c "CREATE EXTENSION IF NOT EXISTS pg_partman;" +psql -v ON_ERROR_STOP=1 -d "$POSTGRES_DB" -c "CREATE EXTENSION IF NOT EXISTS aws_s3;" + +# Initialize the partitioning schema +psql -v ON_ERROR_STOP=1 -d "$POSTGRES_DB" -f /scripts/init-partitioning.sql + +# Run the archival process +psql -v ON_ERROR_STOP=1 -d "$POSTGRES_DB" -f /scripts/archive-partitions.sql + +echo "Database maintenance completed successfully." \ No newline at end of file From e54e971237f4183c6f5c7c63de65d452b3578bb0 Mon Sep 17 00:00:00 2001 From: Chad Nehemiah Date: Wed, 7 May 2025 18:45:38 -0500 Subject: [PATCH 2/3] Update docker-compose.yaml Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 81e66ca1..9c2427e8 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -218,7 +218,7 @@ services: postgres-db: condition: service_healthy volumes: - - ./db-maintenance:/scripts + - ./scripts/db-maintenance:/scripts - postgres-data:/var/lib/postgresql/data:ro - ./s3-archives:/s3-archives environment: From 29d50f19ebdc7cc5f65aef99f9eabac30c7d7b28 Mon Sep 17 00:00:00 2001 From: chad Date: Wed, 7 May 2025 18:51:55 -0500 Subject: [PATCH 3/3] chore: archive data after 1 month --- scripts/db-maintenance/archive-partitions.sql | 4 +- scripts/db-maintenance/init-partioning.sql | 127 ++++++++++-------- 2 files changed, 76 insertions(+), 55 deletions(-) diff --git a/scripts/db-maintenance/archive-partitions.sql b/scripts/db-maintenance/archive-partitions.sql index 26529247..fcf553c4 100644 --- a/scripts/db-maintenance/archive-partitions.sql +++ b/scripts/db-maintenance/archive-partitions.sql @@ -244,8 +244,8 @@ SELECT * FROM archive_old_data( - 24, - -- Keep 24 months of data in PostgreSQL + 1, + -- Keep 1 month of data in PostgreSQL 'your-s3-bucket-name', -- Replace with your S3 bucket 'database-archives' -- S3 prefix diff --git a/scripts/db-maintenance/init-partioning.sql b/scripts/db-maintenance/init-partioning.sql index a94e8cb0..bcbafa08 100644 --- a/scripts/db-maintenance/init-partioning.sql +++ b/scripts/db-maintenance/init-partioning.sql @@ -1,15 +1,27 @@ +-- Create the partman extension if it doesn't exist +CREATE EXTENSION IF NOT EXISTS pg_partman; + -- Create the partman schema if it doesn't exist CREATE SCHEMA IF NOT EXISTS partman; --- Check if the users table needs to be converted to a partitioned table -DO $ $ BEGIN -- Check if users table exists but is not partitioned +-- Function to set up partitioning for a table +CREATE +OR REPLACE FUNCTION setup_table_partitioning( + table_name TEXT, + partition_column TEXT, + retention_months INT +) RETURNS void AS $ $ DECLARE original_table TEXT; + +partition_start TEXT; + +BEGIN -- Check if table exists but is not partitioned IF EXISTS ( SELECT 1 FROM pg_tables WHERE - tablename = 'users' + tablename = table_name AND schemaname = 'public' ) AND NOT EXISTS ( @@ -21,73 +33,82 @@ AND NOT EXISTS ( JOIN pg_namespace n ON c.relnamespace = n.oid WHERE n.nspname = 'public' - AND c.relname = 'users' + AND c.relname = table_name ) THEN -- Rename the existing table -EXECUTE 'ALTER TABLE users RENAME TO users_original'; - --- Create a new partitioned table -EXECUTE ' - CREATE TABLE users ( - id SERIAL, - -- Copy your schema from the original table - -- You may need to adjust this according to your actual schema - -- Use: \d users_original in psql to see the schema - created_at TIMESTAMP NOT NULL - -- Add all other columns here - ) PARTITION BY RANGE (created_at)'; +original_table := table_name || '_original'; + +EXECUTE 'ALTER TABLE ' || quote_ident(table_name) || ' RENAME TO ' || quote_ident(original_table); + +-- Create a new partitioned table with the same structure +EXECUTE 'CREATE TABLE ' || quote_ident(table_name) || ' (LIKE ' || quote_ident(original_table) || ') PARTITION BY RANGE (' || quote_ident(partition_column) || ')'; -- Create the partition maintenance function +partition_start := date_trunc( + 'month', + CURRENT_DATE - (retention_months || ' months') :: interval +) :: text; + PERFORM partman.create_parent( - 'public.users', - 'created_at', + 'public.' || table_name, + partition_column, 'time', 'monthly', p_template_table = > NULL, - p_retention = > '36 months', - p_start_partition = > date_trunc('month', CURRENT_DATE - interval '3 years') :: text + p_retention = > retention_months || ' months', + p_retention_keep_table = > true, + p_start_partition = > partition_start, + p_premake = > 3, + p_automatic_maintenance = > 'on' ); -- Copy data from the original table to the partitioned table -EXECUTE 'INSERT INTO users SELECT * FROM users_original'; +EXECUTE 'INSERT INTO ' || quote_ident(table_name) || ' SELECT * FROM ' || quote_ident(original_table); --- Optional: drop the original table after verifying data --- EXECUTE 'DROP TABLE users_original'; -RAISE NOTICE 'Converted users table to partitioned table'; +RAISE NOTICE 'Converted % table to partitioned table', +table_name; -ELSIF NOT EXISTS ( - SELECT - 1 - FROM - pg_tables - WHERE - tablename = 'users' - AND schemaname = 'public' -) THEN -- Create a new partitioned table if it doesn't exist -EXECUTE ' - CREATE TABLE users ( - id SERIAL, - -- Define your schema - created_at TIMESTAMP NOT NULL - -- Add all other columns here - ) PARTITION BY RANGE (created_at)'; +END IF; --- Create the partition maintenance function -PERFORM partman.create_parent( - 'public.users', - 'created_at', - 'time', - 'monthly', - p_template_table = > NULL, - p_retention = > '36 months', - p_start_partition = > date_trunc('month', CURRENT_DATE - interval '3 years') :: text -); +END; -RAISE NOTICE 'Created new partitioned users table'; +$ $ LANGUAGE plpgsql; -END IF; +-- Set up partitioning for all relevant tables +DO $ $ BEGIN -- Set up partitioning for tasks table +PERFORM setup_table_partitioning('tasks', 'deprecated_at_epoch', 1); + +-- Set up partitioning for stacks table +PERFORM setup_table_partitioning('stacks', 'created_at', 1); + +-- Set up partitioning for stack_settlement_tickets table +PERFORM setup_table_partitioning('stack_settlement_tickets', 'created_at', 1); + +-- Set up partitioning for stack_attestation_disputes table +PERFORM setup_table_partitioning('stack_attestation_disputes', 'created_at', 1); END $ $; -- Set up the maintenance job for pg_partman SELECT - partman.run_maintenance(p_analyze := true); \ No newline at end of file + partman.run_maintenance(p_analyze := true); + +-- Configure automatic maintenance for all partitioned tables +UPDATE + partman.part_config +SET + automatic_maintenance = 'on' +WHERE + parent_table IN ( + 'public.tasks', + 'public.stacks', + 'public.stack_settlement_tickets', + 'public.stack_attestation_disputes' + ); + +-- Create a maintenance function to be called by cron +CREATE +OR REPLACE FUNCTION partman.run_maintenance_proc() RETURNS void LANGUAGE plpgsql AS $ $ BEGIN PERFORM partman.run_maintenance(p_analyze := true); + +END; + +$ $; \ No newline at end of file