From a6c20f5b500ec99cf8d82a2e5e6a2f1b904f0d4f Mon Sep 17 00:00:00 2001 From: Shahid Ullah Date: Wed, 19 Feb 2025 03:30:47 +0500 Subject: [PATCH 1/3] [PG-961] automated bash script for replication, expected files and updated sql --- .../expected/verify_incremental_data.out | 163 ++++++++++ .../backup/expected/verify_sample_data.out | 279 ++++++++++++++++++ ci_scripts/backup/replication_script.sh | 190 ++++++++++++ ci_scripts/backup/sql/incremental_data.sql | 3 +- 4 files changed, 634 insertions(+), 1 deletion(-) create mode 100644 ci_scripts/backup/expected/verify_incremental_data.out create mode 100644 ci_scripts/backup/expected/verify_sample_data.out create mode 100755 ci_scripts/backup/replication_script.sh diff --git a/ci_scripts/backup/expected/verify_incremental_data.out b/ci_scripts/backup/expected/verify_incremental_data.out new file mode 100644 index 0000000000000..08f741f320244 --- /dev/null +++ b/ci_scripts/backup/expected/verify_incremental_data.out @@ -0,0 +1,163 @@ +-- =============================================== +-- 1. Verify TDE Tables Exist +-- =============================================== +SELECT table_name +FROM information_schema.tables +WHERE table_schema = 'public' +AND table_name IN ('tde_table', 'tde_child', 'audit_log', 'part_table', 'part1') +ORDER BY table_name; + table_name +------------ + audit_log + part1 + part_table + tde_child + tde_table +(5 rows) + +-- =============================================== +-- 2. Verify Columns of Tables +-- =============================================== +SELECT column_name, data_type, table_name +FROM information_schema.columns +WHERE table_name IN ('tde_table', 'tde_child', 'audit_log', 'part_table', 'part1') +ORDER BY table_name, ordinal_position; + column_name | data_type | table_name +-------------+-----------+------------ + id | integer | audit_log + table_name | text | audit_log + operation | text | audit_log + id | integer | part1 + data | text | part1 + id | integer | part_table + data | text | part_table + id | integer | tde_child + parent_id | integer | tde_child + id | integer | tde_table + name | text | tde_table +(11 rows) + +-- =============================================== +-- 3. Verify Constraints Exist +-- =============================================== +SELECT conname, conrelid::regclass, contype +FROM pg_constraint +WHERE connamespace = 'public'::regnamespace +AND conrelid::regclass::text IN ('tde_table', 'tde_child') +ORDER BY conrelid; + conname | conrelid | contype +--------------------------+-----------+--------- + tde_table_pkey | tde_table | p + unique_name | tde_table | u + check_name_length | tde_table | c + tde_child_pkey | tde_child | p + tde_child_parent_id_fkey | tde_child | f +(5 rows) + +-- =============================================== +-- 4. Verify Index Exists +-- =============================================== +SELECT indexname, tablename +FROM pg_indexes +WHERE schemaname = 'public' AND tablename = 'tde_table'; + indexname | tablename +----------------+----------- + tde_table_pkey | tde_table + idx_tde_name | tde_table + unique_name | tde_table +(3 rows) + +-- =============================================== +-- 5. Verify Functions Exist +-- =============================================== +SELECT proname, prorettype::regtype +FROM pg_proc +JOIN pg_namespace ON pg_proc.pronamespace = pg_namespace.oid +WHERE nspname = 'public' +AND proname = 'get_tde_data'; + proname | prorettype +--------------+------------ + get_tde_data | record +(1 row) + +-- =============================================== +-- 6. Verify Function Output +-- =============================================== +SELECT * FROM get_tde_data(); + id | name +----+------- + 1 | Alice + 2 | Bob + 3 | khan + 4 | Bob2 +(4 rows) + +-- =============================================== +-- 7. Verify Partitioning +-- =============================================== +SELECT inhrelid::regclass AS partition_name, inhparent::regclass AS parent_table +FROM pg_inherits +WHERE inhparent::regclass::text = 'part_table' +ORDER BY inhparent; + partition_name | parent_table +----------------+-------------- + part1 | part_table +(1 row) + +-- =============================================== +-- 8. Verify Triggers Exist +-- =============================================== +SELECT tgname, relname +FROM pg_trigger +JOIN pg_class ON pg_trigger.tgrelid = pg_class.oid +WHERE NOT tgisinternal AND relname = 'tde_table'; + tgname | relname +-----------+----------- + tde_audit | tde_table +(1 row) + +-- =============================================== +-- 9. Verify Data Integrity +-- =============================================== +-- Check data counts +SELECT 'tde_table' AS table_name, COUNT(*) FROM tde_table +UNION ALL +SELECT 'tde_child', COUNT(*) FROM tde_child +UNION ALL +SELECT 'audit_log', COUNT(*) FROM audit_log +UNION ALL +SELECT 'part_table', COUNT(*) FROM part_table; + table_name | count +------------+------- + tde_table | 4 + tde_child | 1 + audit_log | 2 + part_table | 1 +(4 rows) + +-- Ensure tde_child references valid parent_id +SELECT tde_child.id, tde_child.parent_id +FROM tde_child +LEFT JOIN tde_table ON tde_child.parent_id = tde_table.id +WHERE tde_table.id IS NULL; + id | parent_id +----+----------- +(0 rows) + +-- =============================================== +-- 10. Verify tables are encrypted +-- =============================================== +-- Verify all tables exist and are encrypted +SELECT tablename, pg_tde_is_encrypted(tablename::TEXT) AS is_encrypted +FROM pg_tables +WHERE schemaname = 'public' +AND tablename IN ('tde_table', 'tde_child', 'part1','part_table') +ORDER BY tablename; + tablename | is_encrypted +------------+-------------- + part1 | t + part_table | f + tde_child | t + tde_table | t +(4 rows) + diff --git a/ci_scripts/backup/expected/verify_sample_data.out b/ci_scripts/backup/expected/verify_sample_data.out new file mode 100644 index 0000000000000..4a8984ca46492 --- /dev/null +++ b/ci_scripts/backup/expected/verify_sample_data.out @@ -0,0 +1,279 @@ +-- Set datestyle for consistency +SET datestyle TO 'iso, dmy'; +SET +-- ===================================================== +-- 1. Verify Tables Exist +-- ===================================================== +SELECT table_name +FROM information_schema.tables +WHERE table_schema = 'public' +AND table_name IN ('dept', 'emp', 'jobhist'); + table_name +------------ + dept + emp + jobhist +(3 rows) + +-- ===================================================== +-- 2. Verify Views Exist +-- ===================================================== +SELECT table_name +FROM information_schema.views +WHERE table_schema = 'public' +AND table_name = 'salesemp'; + table_name +------------ + salesemp +(1 row) + +-- ===================================================== +-- 3. Verify Columns of Tables +-- ===================================================== +-- Dept Table +SELECT column_name, data_type +FROM information_schema.columns +WHERE table_name = 'dept' +ORDER BY ordinal_position; + column_name | data_type +-------------+------------------- + deptno | numeric + dname | character varying + loc | character varying +(3 rows) + +-- Emp Table +SELECT column_name, data_type +FROM information_schema.columns +WHERE table_name = 'emp' +ORDER BY ordinal_position; + column_name | data_type +-------------+------------------- + empno | numeric + ename | character varying + job | character varying + mgr | numeric + hiredate | date + sal | numeric + comm | numeric + deptno | numeric +(8 rows) + +-- Jobhist Table +SELECT column_name, data_type +FROM information_schema.columns +WHERE table_name = 'jobhist' +ORDER BY ordinal_position; + column_name | data_type +-------------+----------------------------- + empno | numeric + startdate | timestamp without time zone + enddate | timestamp without time zone + job | character varying + sal | numeric + comm | numeric + deptno | numeric + chgdesc | character varying +(8 rows) + +-- ===================================================== +-- 4. Verify Sequences Exist +-- ===================================================== +SELECT relname +FROM pg_class +WHERE relkind = 'S' +AND relname = 'next_empno'; + relname +------------ + next_empno +(1 row) + +-- ===================================================== +-- 5. Verify Functions Exist +-- ===================================================== +SELECT proname, prorettype::regtype +FROM pg_proc +JOIN pg_namespace ON pg_proc.pronamespace = pg_namespace.oid +WHERE nspname = 'public' +AND proname IN ('list_emp', 'select_emp', 'emp_query', 'emp_query_caller', + 'emp_comp', 'new_empno', 'hire_clerk', 'hire_salesman'); + proname | prorettype +------------------+------------ + list_emp | void + select_emp | void + emp_query | record + emp_query_caller | void + emp_comp | numeric + new_empno | integer + hire_clerk | numeric + hire_salesman | numeric +(8 rows) + +-- ===================================================== +-- 6. Verify Data in Tables +-- ===================================================== +-- Count rows in each table +SELECT 'dept' AS table_name, COUNT(*) FROM dept +UNION ALL +SELECT 'emp', COUNT(*) FROM emp +UNION ALL +SELECT 'jobhist', COUNT(*) FROM jobhist; + table_name | count +------------+------- + dept | 4 + emp | 14 + jobhist | 17 +(3 rows) + +-- Check if `emp` employees belong to valid `dept` +SELECT emp.empno, emp.ename, emp.deptno, dept.deptno +FROM emp +LEFT JOIN dept ON emp.deptno = dept.deptno +WHERE dept.deptno IS NULL; + empno | ename | deptno | deptno +-------+-------+--------+-------- +(0 rows) + +-- Check if `jobhist` records have valid `empno` +SELECT jobhist.empno, jobhist.job, jobhist.sal +FROM jobhist +LEFT JOIN emp ON jobhist.empno = emp.empno +WHERE emp.empno IS NULL; + empno | job | sal +-------+-----+----- +(0 rows) + +-- ===================================================== +-- 7. Verify Expected Data in Tables +-- ===================================================== +-- Sample Data from `dept` +SELECT * FROM dept LIMIT 5; + deptno | dname | loc +--------+------------+---------- + 10 | ACCOUNTING | NEW YORK + 20 | RESEARCH | DALLAS + 30 | SALES | CHICAGO + 40 | OPERATIONS | BOSTON +(4 rows) + +-- Sample Data from `emp` +SELECT * FROM emp ORDER BY empno LIMIT 5; + empno | ename | job | mgr | hiredate | sal | comm | deptno +-------+--------+----------+------+------------+---------+---------+-------- + 7369 | SMITH | CLERK | 7902 | 1980-12-17 | 800.00 | | 20 + 7499 | ALLEN | SALESMAN | 7698 | 1981-02-20 | 1600.00 | 300.00 | 30 + 7521 | WARD | SALESMAN | 7698 | 1981-02-22 | 1250.00 | 500.00 | 30 + 7566 | JONES | MANAGER | 7839 | 1981-04-02 | 2975.00 | | 20 + 7654 | MARTIN | SALESMAN | 7698 | 1981-09-28 | 1250.00 | 1400.00 | 30 +(5 rows) + +-- Sample Data from `jobhist` +SELECT * FROM jobhist ORDER BY empno LIMIT 5; + empno | startdate | enddate | job | sal | comm | deptno | chgdesc +-------+---------------------+---------+----------+---------+---------+--------+---------- + 7369 | 1980-12-17 00:00:00 | | CLERK | 800.00 | | 20 | New Hire + 7499 | 1981-02-20 00:00:00 | | SALESMAN | 1600.00 | 300.00 | 30 | New Hire + 7521 | 1981-02-22 00:00:00 | | SALESMAN | 1250.00 | 500.00 | 30 | New Hire + 7566 | 1981-04-02 00:00:00 | | MANAGER | 2975.00 | | 20 | New Hire + 7654 | 1981-09-28 00:00:00 | | SALESMAN | 1250.00 | 1400.00 | 30 | New Hire +(5 rows) + +SELECT * FROM salesemp; + empno | ename | hiredate | sal | comm +-------+--------+------------+---------+--------- + 7499 | ALLEN | 1981-02-20 | 1600.00 | 300.00 + 7521 | WARD | 1981-02-22 | 1250.00 | 500.00 + 7654 | MARTIN | 1981-09-28 | 1250.00 | 1400.00 + 7844 | TURNER | 1981-09-08 | 1500.00 | 0.00 +(4 rows) + +-- Validate if department names follow expected values +SELECT deptno, dname FROM dept +WHERE dname NOT IN ('HR', 'Finance', 'Sales', 'IT', 'Admin'); + deptno | dname +--------+------------ + 10 | ACCOUNTING + 20 | RESEARCH + 30 | SALES + 40 | OPERATIONS +(4 rows) + +-- Validate if `emp` salaries are within expected range +SELECT empno, ename, job, sal +FROM emp +WHERE sal < 3000 OR sal > 20000; + empno | ename | job | sal +-------+--------+----------+--------- + 7369 | SMITH | CLERK | 800.00 + 7499 | ALLEN | SALESMAN | 1600.00 + 7521 | WARD | SALESMAN | 1250.00 + 7566 | JONES | MANAGER | 2975.00 + 7654 | MARTIN | SALESMAN | 1250.00 + 7698 | BLAKE | MANAGER | 2850.00 + 7782 | CLARK | MANAGER | 2450.00 + 7844 | TURNER | SALESMAN | 1500.00 + 7876 | ADAMS | CLERK | 1100.00 + 7900 | JAMES | CLERK | 950.00 + 7934 | MILLER | CLERK | 1300.00 +(11 rows) + +-- Check if any employees were hired before 2000 (if expected) +SELECT empno, ename, hiredate FROM emp +WHERE hiredate < '2000-01-01'; + empno | ename | hiredate +-------+--------+------------ + 7369 | SMITH | 1980-12-17 + 7499 | ALLEN | 1981-02-20 + 7521 | WARD | 1981-02-22 + 7566 | JONES | 1981-04-02 + 7654 | MARTIN | 1981-09-28 + 7698 | BLAKE | 1981-05-01 + 7782 | CLARK | 1981-06-09 + 7788 | SCOTT | 1987-04-19 + 7839 | KING | 1981-11-17 + 7844 | TURNER | 1981-09-08 + 7876 | ADAMS | 1987-05-23 + 7900 | JAMES | 1981-12-03 + 7902 | FORD | 1981-12-03 + 7934 | MILLER | 1982-01-23 +(14 rows) + +-- Verify sequence correctness (Check latest employee number) +-- SELECT last_value FROM next_empno; +-- Verify if function `new_empno()` returns next expected value +-- SELECT new_empno(); +-- ===================================================== +-- 8. Verify Referential Integrity +-- ===================================================== +-- Ensure all employees in `jobhist` exist in `emp` +SELECT jobhist.empno FROM jobhist +LEFT JOIN emp ON jobhist.empno = emp.empno +WHERE emp.empno IS NULL; + empno +------- +(0 rows) + +-- Ensure `emp.deptno` exists in `dept` +SELECT emp.empno, emp.deptno FROM emp +LEFT JOIN dept ON emp.deptno = dept.deptno +WHERE dept.deptno IS NULL; + empno | deptno +-------+-------- +(0 rows) + +-- =============================================== +-- 9. Verify tables are encrypted +-- =============================================== +-- Verify all tables exist and are encrypted +SELECT tablename, pg_tde_is_encrypted(tablename::TEXT) AS is_encrypted +FROM pg_tables +WHERE schemaname = 'public' +AND tablename IN ('dept', 'emp', 'jobhist') +ORDER BY tablename; + tablename | is_encrypted +-----------+-------------- + dept | t + emp | t + jobhist | t +(3 rows) + diff --git a/ci_scripts/backup/replication_script.sh b/ci_scripts/backup/replication_script.sh new file mode 100755 index 0000000000000..ebaabbad50ded --- /dev/null +++ b/ci_scripts/backup/replication_script.sh @@ -0,0 +1,190 @@ +#!/bin/bash +# This script is used to set up replication setup with TDE enabled +export TDE_MODE=1 + +SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)" +INSTALL_DIR="$SCRIPT_DIR/../../pginst" +export PATH=$INSTALL_DIR/bin:$PATH + +# Environment variables for PGDATA and archive directories +MASTER_DATA=$INSTALL_DIR/primary +STANDBY1_DATA=$INSTALL_DIR/standby1 +STANDBY2_DATA=$INSTALL_DIR/standby2 +ARCHIVE_DIR=$INSTALL_DIR/archive +SQL_DIR=$SCRIPT_DIR/backup/sql +MASTER_PORT=55433 +STANDBY1_PORT=55434 +STANDBY2_PORT=55435 +DB_NAME=tde_db +EXPECTED_DIR=$SCRIPT_DIR/backup/expected +ACTUAL_DIR=$SCRIPT_DIR/actual +LOGFILE=$INSTALL_DIR/replication.log + +# Create directories for expected, actual, and archive files +mkdir -p $EXPECTED_DIR $ACTUAL_DIR $ARCHIVE_DIR + +# Function to log messages +log_message() { + echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" |tee -a "$LOGFILE" +} + +# Function to run SQL files and capture results +run_sql() { + local sql_file=$1 + local db_name="${2:-$DB_NAME}" + local port="${3:-$MASTER_PORT}" + local out_dir="${4:-$ACTUAL_DIR}" + local file_name=$(basename "$sql_file" .sql) + + if [ ! -d "$out_dir" ]; then + mkdir -p "$out_dir" + fi + + if [ -f "$out_dir/$file_name.out" ]; then + rm -fr "$out_dir/$file_name.out" + fi + psql -d $db_name -p $port -e -a -f "$SQL_DIR/$sql_file" > "$out_dir/$file_name.out" 2>&1 +} + +# Function to verify expected vs actual output +verify_output() { + local sql_file=$1 + local actualdir=$2 + local file_name=$(basename "$sql_file" .sql) + local expected_file="$EXPECTED_DIR/$file_name.out" + local actual_file="$actualdir/$file_name.out" + local diff_file="$actualdir/$file_name.diff" + + if [ -f $diff_file ]; then + rm -fr $diff_file + fi + + if diff -q "$expected_file" "$actual_file" > /dev/null; then + log_message "$sql_file matches expected output. ✅" + else + log_message "$sql_file output mismatch. ❌" + diff "$expected_file" "$actual_file" > $diff_file + log_message "See diff file. $diff_file " + fi +} + +#====================================================== +configure_primary_server(){ + # Create the primary server + source $SCRIPT_DIR/configure-tde-server.sh $MASTER_DATA $MASTER_PORT + + # Need to verify it with different configurations + # Like wal_buffers, wal_writer_delay, wal_writer_flush_after, etc. + # Basic configuration of PostgreSQL + cat >> $MASTER_DATA/postgresql.conf <> $MASTER_DATA/pg_hba.conf < /dev/null +} + +#===================================================== +configure_standby() { + local standby_data=$1 + local standby_port=$2 + local standby_log=$standby_data/standby.log + + # Make sure $standby_data is empty + if [ -d "$standby_data" ]; then + if pg_ctl -D "$standby_data" status -o "-p $standby_port" >/dev/null; then + pg_ctl -D "$standby_data" stop -o "-p $standby_port" + fi + rm -rf "$standby_data" + fi + + log_message "Creating pg_basebackup $standby_data..." + pg_basebackup -D $standby_data -U replication -p $MASTER_PORT -Xs -R -P + + # Update the postgresql.conf file with the port $standby_port + log_message "Updating $standby_data/postgresql.conf" + cat >> $standby_data/postgresql.conf < /dev/null + + # Verify the data on the standby1 + run_sql verify_incremental_data.sql $DB_NAME $STANDBY1_PORT "${ACTUAL_DIR}/standby1" + log_message "Verifying incremental data on master and standby1" + verify_output verify_incremental_data.sql "${ACTUAL_DIR}/standby1" + + # Verify the data on the standby2 + run_sql verify_incremental_data.sql $DB_NAME $STANDBY2_PORT "${ACTUAL_DIR}/standby2" + log_message "Verifying incremental data on master and standby2" + verify_output verify_incremental_data.sql "${ACTUAL_DIR}/standby2" +} + +promote_standby() { + local standby_data=$1 + local standby_log=$standby_data/standby.log + pg_ctl -D $standby_data promote -l $standby_log +} + +pg_rewind() { + pg_rewind --target-pgdata=$MASTER_DATA --source-server="port=$STANDBY1_PORT user=postgres dbname=$DB_NAME" > $ACTUAL_DIR/pg_rewind.log 2>&1 +} +#====================================================== +# Main Script Execution +main() { + echo "=== Starting replication Test Automation ===" + configure_primary_server + configure_standby $STANDBY1_DATA $STANDBY1_PORT + configure_standby $STANDBY2_DATA $STANDBY2_PORT + data_verification + #promote_standby + #pg_rewind + echo "=== replication Test Automation Completed! === 🚀" +} + +# Run Main Function +main + diff --git a/ci_scripts/backup/sql/incremental_data.sql b/ci_scripts/backup/sql/incremental_data.sql index 9f1e6775d5f58..6b583d1731ba0 100644 --- a/ci_scripts/backup/sql/incremental_data.sql +++ b/ci_scripts/backup/sql/incremental_data.sql @@ -19,7 +19,7 @@ SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename; -- Adding constraints ALTER TABLE tde_table ADD CONSTRAINT unique_name UNIQUE(name); -ALTER TABLE tde_table ADD CONSTRAINT check_name_length CHECK (LENGTH(name) > 3); +ALTER TABLE tde_table ADD CONSTRAINT check_name_length CHECK (LENGTH(name) > 2); SELECT conname, conrelid::regclass, contype FROM pg_constraint WHERE connamespace = 'public'::regnamespace ORDER BY conrelid; @@ -68,6 +68,7 @@ CREATE TRIGGER tde_audit AFTER INSERT OR UPDATE OR DELETE ON tde_table FOR EACH ROW EXECUTE FUNCTION audit_tde_changes(); +INSERT INTO tde_table (name) VALUES ('khan'), ('Bob2'); SELECT tgname, relname FROM pg_trigger JOIN pg_class ON pg_trigger.tgrelid = pg_class.oid WHERE NOT tgisinternal ORDER BY relname; -- Check WAL logs for plaintext leaks From 7ccbebfcc6a8f4de2135709a05a38b2b30430ce6 Mon Sep 17 00:00:00 2001 From: Shahid Ullah Date: Wed, 19 Feb 2025 19:37:04 +0500 Subject: [PATCH 2/3] [PG-961] Add testcase to verify ecrypted data at rest on both master and replica nodes. --- ci_scripts/backup/replication_script.sh | 38 ++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/ci_scripts/backup/replication_script.sh b/ci_scripts/backup/replication_script.sh index ebaabbad50ded..7b2595085b219 100755 --- a/ci_scripts/backup/replication_script.sh +++ b/ci_scripts/backup/replication_script.sh @@ -19,6 +19,8 @@ DB_NAME=tde_db EXPECTED_DIR=$SCRIPT_DIR/backup/expected ACTUAL_DIR=$SCRIPT_DIR/actual LOGFILE=$INSTALL_DIR/replication.log +TABLE_NAME="emp" +SEARCHED_TEXT="SMITH" # Create directories for expected, actual, and archive files mkdir -p $EXPECTED_DIR $ACTUAL_DIR $ARCHIVE_DIR @@ -64,7 +66,34 @@ verify_output() { else log_message "$sql_file output mismatch. ❌" diff "$expected_file" "$actual_file" > $diff_file - log_message "See diff file. $diff_file " + log_message "See diff file. $diff_file " + fi +} + +# Verify Data Encryption at Rest +verify_encrypted_data_at_rest() { + local table_name="${1:-$TABLE_NAME}" + local search_text="${2:-$SEARCHED_TEXT}" + local pg_port="${3:-$MASTER_PORT}" + local db_name="${4:-$DB_NAME}" + # Get Data File Path + pg_relation_filepath=$(psql -p $pg_port -d "$db_name" -t -c "SELECT pg_relation_filepath('$table_name');" | xargs) + data_dir_path=$( psql -p $pg_port -d "$db_name" -t -c "SHOW data_directory" | xargs) + file_name="$data_dir_path/$pg_relation_filepath" + + log_message "Verifying data encryption at rest for table: $table_name in database: $db_name on port: $pg_port" + log_message "Data file path: $file_name" + + # Extract first 10 lines of raw data + raw_data=$(sudo hexdump -C "$file_name" | head -n 10 || true) + log_message "$raw_data" + + readable_text=$(sudo strings "$file_name" | grep "$search_text" || true) + # Check if there is readable text in the data file + if [[ -n "$readable_text" ]]; then + log_message "Readable text detected! Data appears UNENCRYPTED.❌ " + else + log_message "Test Passed: Data appears to be encrypted! ✅ " fi } @@ -163,6 +192,12 @@ data_verification() { verify_output verify_incremental_data.sql "${ACTUAL_DIR}/standby2" } +verify_data_ondisk(){ + verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $MASTER_PORT $DB_NAME + verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $STANDBY1_PORT $DB_NAME + verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $STANDBY2_PORT $DB_NAME +} + promote_standby() { local standby_data=$1 local standby_log=$standby_data/standby.log @@ -180,6 +215,7 @@ main() { configure_standby $STANDBY1_DATA $STANDBY1_PORT configure_standby $STANDBY2_DATA $STANDBY2_PORT data_verification + verify_data_ondisk #promote_standby #pg_rewind echo "=== replication Test Automation Completed! === 🚀" From 541ca24ac42cdf873a0aceb87b6d7085582f6d5c Mon Sep 17 00:00:00 2001 From: Shahid Ullah Date: Fri, 21 Feb 2025 02:44:24 +0500 Subject: [PATCH 3/3] [PG-961] Extended replication test coverage by adding some more scenarios --- .../backup/expected/verify_pgbench_data.out | 56 ++ ci_scripts/backup/replication_script.sh | 483 +++++++++++++----- ci_scripts/backup/sql/verify_pgbench_data.sql | 29 ++ ci_scripts/configure-tde-server.sh | 6 +- 4 files changed, 439 insertions(+), 135 deletions(-) create mode 100644 ci_scripts/backup/expected/verify_pgbench_data.out create mode 100644 ci_scripts/backup/sql/verify_pgbench_data.sql diff --git a/ci_scripts/backup/expected/verify_pgbench_data.out b/ci_scripts/backup/expected/verify_pgbench_data.out new file mode 100644 index 0000000000000..45d70d624fb33 --- /dev/null +++ b/ci_scripts/backup/expected/verify_pgbench_data.out @@ -0,0 +1,56 @@ +-- Verify pgbench accounts data is replicated correctly +SELECT COUNT(*) AS total_accounts FROM pgbench_accounts; + total_accounts +---------------- + 1000000 +(1 row) + +-- Check if pgbench history table matches expected transactions +SELECT COUNT(*) AS total_history FROM pgbench_history; + total_history +--------------- + 57091 +(1 row) + +-- Check last transaction timestamps to ensure no delay +SELECT MAX(tid) AS last_transaction FROM pgbench_history; + last_transaction +------------------ + 100 +(1 row) + +-- Check data integrity by comparing last 5 accounts +SELECT aid, bid FROM pgbench_accounts ORDER BY aid DESC LIMIT 5; + aid | bid +---------+----- + 1000000 | 10 + 999999 | 10 + 999998 | 10 + 999997 | 10 + 999996 | 10 +(5 rows) + +-- Check last account ID and balance for consistency +SELECT MAX(aid) AS last_account_id FROM pgbench_accounts; + last_account_id +----------------- + 1000000 +(1 row) + +-- Detect if the query is running on a Replica (Standby) +SELECT pg_is_in_recovery() AS is_replica; + is_replica +------------ + t +(1 row) + +-- Only check WAL replay status on standby nodes +--DO $$ +--BEGIN +-- IF pg_is_in_recovery() THEN +-- RAISE NOTICE 'Checking WAL replay status...'; +-- PERFORM pg_last_wal_replay_lsn(), pg_last_wal_receive_lsn(); +-- ELSE +-- RAISE NOTICE 'Skipping WAL check on Master.'; +-- END IF; +--END $$; diff --git a/ci_scripts/backup/replication_script.sh b/ci_scripts/backup/replication_script.sh index 7b2595085b219..50238c0cf70b8 100755 --- a/ci_scripts/backup/replication_script.sh +++ b/ci_scripts/backup/replication_script.sh @@ -1,226 +1,445 @@ #!/bin/bash -# This script is used to set up replication setup with TDE enabled +# This script is used to set up replication with TDE enabled export TDE_MODE=1 +# Paths and Configurations SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1; pwd -P)" -INSTALL_DIR="$SCRIPT_DIR/../../pginst" +INSTALL_DIR="$SCRIPT_DIR/../../pginst/17" export PATH=$INSTALL_DIR/bin:$PATH -# Environment variables for PGDATA and archive directories +# PostgreSQL Data Directories MASTER_DATA=$INSTALL_DIR/primary STANDBY1_DATA=$INSTALL_DIR/standby1 STANDBY2_DATA=$INSTALL_DIR/standby2 ARCHIVE_DIR=$INSTALL_DIR/archive SQL_DIR=$SCRIPT_DIR/backup/sql +EXPECTED_DIR=$SCRIPT_DIR/backup/expected +ACTUAL_DIR=$SCRIPT_DIR/actual + +# PostgreSQL Configuration MASTER_PORT=55433 STANDBY1_PORT=55434 STANDBY2_PORT=55435 DB_NAME=tde_db -EXPECTED_DIR=$SCRIPT_DIR/backup/expected -ACTUAL_DIR=$SCRIPT_DIR/actual -LOGFILE=$INSTALL_DIR/replication.log TABLE_NAME="emp" SEARCHED_TEXT="SMITH" -# Create directories for expected, actual, and archive files +# pgbench Configuration +SCALE=50 # ~5 million rows +DURATION=300 # 5 minutes test +CLIENTS=16 # Moderate concurrent load +THREADS=4 # Suitable for 4+ core machines + +# Log File +LOGFILE=$INSTALL_DIR/replication_test.log + +# PASS/FAIL Counters +TESTS_PASSED=0 +TESTS_FAILED=0 + +# Ensure necessary directories exist mkdir -p $EXPECTED_DIR $ACTUAL_DIR $ARCHIVE_DIR -# Function to log messages +# Logging Function log_message() { echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" |tee -a "$LOGFILE" } -# Function to run SQL files and capture results +# Function to run SQL files and capture results with PASS/FAIL reporting run_sql() { local sql_file=$1 local db_name="${2:-$DB_NAME}" local port="${3:-$MASTER_PORT}" local out_dir="${4:-$ACTUAL_DIR}" local file_name=$(basename "$sql_file" .sql) - - if [ ! -d "$out_dir" ]; then - mkdir -p "$out_dir" - fi + local log_file="$out_dir/$file_name.out" - if [ -f "$out_dir/$file_name.out" ]; then - rm -fr "$out_dir/$file_name.out" - fi - psql -d $db_name -p $port -e -a -f "$SQL_DIR/$sql_file" > "$out_dir/$file_name.out" 2>&1 + # Ensure output directory exists + mkdir -p "$out_dir" + + # Remove old log file if exists + [ -f "$log_file" ] && rm -f "$log_file" + + # Run SQL and capture output + psql -d "$db_name" -p "$port" -e -a -f "$SQL_DIR/$sql_file" > "$log_file" 2>&1 } -# Function to verify expected vs actual output +# Function to compare expected vs. actual SQL execution output with PASS/FAIL reporting verify_output() { local sql_file=$1 - local actualdir=$2 + local actual_dir=$2 local file_name=$(basename "$sql_file" .sql) local expected_file="$EXPECTED_DIR/$file_name.out" - local actual_file="$actualdir/$file_name.out" - local diff_file="$actualdir/$file_name.diff" - - if [ -f $diff_file ]; then - rm -fr $diff_file + local actual_file="$actual_dir/$file_name.out" + local diff_file="$actual_dir/$file_name.diff" + + #log_message "🔎 Verifying output for: $sql_file" + + # Ensure expected output file exists + if [ ! -f "$expected_file" ]; then + log_message "❌ Expected output file missing: $expected_file" + ((TESTS_FAILED++)) + return 1 fi + # Ensure actual output file exists + if [ ! -f "$actual_file" ]; then + log_message "❌ Actual output file missing: $actual_file" + ((TESTS_FAILED++)) + return 1 + fi + + # Remove old diff file if exists + [ -f "$diff_file" ] && rm -f "$diff_file" + + # Compare files if diff -q "$expected_file" "$actual_file" > /dev/null; then - log_message "$sql_file matches expected output. ✅" + log_message "✅ Output matches expected result." + ((TESTS_PASSED++)) else - log_message "$sql_file output mismatch. ❌" - diff "$expected_file" "$actual_file" > $diff_file - log_message "See diff file. $diff_file " + diff "$expected_file" "$actual_file" > "$diff_file" + log_message "❌ Output mismatch. See diff file: $diff_file" + ((TESTS_FAILED++)) fi } -# Verify Data Encryption at Rest -verify_encrypted_data_at_rest() { - local table_name="${1:-$TABLE_NAME}" - local search_text="${2:-$SEARCHED_TEXT}" - local pg_port="${3:-$MASTER_PORT}" - local db_name="${4:-$DB_NAME}" - # Get Data File Path - pg_relation_filepath=$(psql -p $pg_port -d "$db_name" -t -c "SELECT pg_relation_filepath('$table_name');" | xargs) - data_dir_path=$( psql -p $pg_port -d "$db_name" -t -c "SHOW data_directory" | xargs) - file_name="$data_dir_path/$pg_relation_filepath" +# Function to configure the primary PostgreSQL server +configure_primary_server() { + log_message "Configuring Primary PostgreSQL Server..." - log_message "Verifying data encryption at rest for table: $table_name in database: $db_name on port: $pg_port" - log_message "Data file path: $file_name" + # Run TDE configuration script + source "$SCRIPT_DIR/configure-tde-server.sh" "$MASTER_DATA" "$MASTER_PORT" >> $LOGFILE 2>&1 - # Extract first 10 lines of raw data - raw_data=$(sudo hexdump -C "$file_name" | head -n 10 || true) - log_message "$raw_data" + # Update postgresql.conf with replication settings + cat >> "$MASTER_DATA/postgresql.conf" <> $MASTER_DATA/postgresql.conf <> $MASTER_DATA/pg_hba.conf <> "$MASTER_DATA/pg_hba.conf" <> $LOGFILE 2>&1 + + # Create replication user + psql -p "$MASTER_PORT" -c "CREATE USER replication WITH REPLICATION;" >> $LOGFILE 2>&1 + + # Create TDE-enabled database + createdb -p "$MASTER_PORT" "$DB_NAME" >> $LOGFILE 2>&1 + + # Enable pg_tde extension + psql -p "$MASTER_PORT" -d "$DB_NAME" -c "CREATE EXTENSION pg_tde;" >> $LOGFILE 2>&1 + + # Set TDE principal key + psql -p "$MASTER_PORT" -d "$DB_NAME" -c "SELECT pg_tde_set_default_principal_key('default-principal-key','reg_file-global',false);" >> $LOGFILE 2>&1 - # Create tde_db database - createdb -p $MASTER_PORT $DB_NAME - psql -p $MASTER_PORT -d $DB_NAME -c "CREATE EXTENSION pg_tde" - psql -p $MASTER_PORT -d $DB_NAME -c "SELECT pg_tde_set_default_principal_key('default-principal-key','reg_file-global',false)" + # Load sample data into the database + psql -p "$MASTER_PORT" -d "$DB_NAME" -f "$SQL_DIR/sample_data.sql" >> $LOGFILE 2>&1 - # Create separate database for TDE functionality - psql -p $MASTER_PORT -d $DB_NAME -f $SQL_DIR/sample_data.sql > /dev/null + echo "Primary Server Configuration Completed! " >> $LOGFILE 2>&1 } -#===================================================== +# Function to configure the standby PostgreSQL server configure_standby() { local standby_data=$1 local standby_port=$2 - local standby_log=$standby_data/standby.log + local standby_log="$standby_data/standby.log" + + log_message "Configuring Standby Server on Port: $standby_port..." - # Make sure $standby_data is empty + # Ensure the standby data directory is clean if [ -d "$standby_data" ]; then - if pg_ctl -D "$standby_data" status -o "-p $standby_port" >/dev/null; then - pg_ctl -D "$standby_data" stop -o "-p $standby_port" + if pg_ctl -D "$standby_data" status -o "-p $standby_port" >/dev/null 2>&1; then + pg_ctl -D "$standby_data" stop -o "-p $standby_port" >> $LOGFILE 2>&1 fi rm -rf "$standby_data" fi - - log_message "Creating pg_basebackup $standby_data..." - pg_basebackup -D $standby_data -U replication -p $MASTER_PORT -Xs -R -P - # Update the postgresql.conf file with the port $standby_port - log_message "Updating $standby_data/postgresql.conf" + # Create a fresh base backup from the primary + pg_basebackup -D "$standby_data" -U replication -p "$MASTER_PORT" -Xs -R -P >> $LOGFILE + + # Update the postgresql.conf file with the correct port and log settings cat >> $standby_data/postgresql.conf <> $LOGFILE 2>&1 + + # Give some time for the standby to initialize sleep 5 + + # Verify that the standby is running and connected to the primary + psql -h "localhost" -p "$standby_port" -d postgres -c "SELECT pg_is_in_recovery();" | grep -q "t" + if [ $? -eq 0 ]; then + log_message "✅ Standby Server is in Recovery Mode (Replication Active)" + ((TESTS_PASSED++)) + else + log_message "❌ Standby Server is NOT in recovery mode! Replication may have failed." + ((TESTS_FAILED++)) + fi +} +insert_data(){ + local sql_file="${1:-sampe_data.sql}" + local db_name="${2:-$DB_NAME}" + local port="${3:-$MASTER_PORT}" + + psql -p "$port" -d "$db_name" -f "$SQL_DIR/$sql_file" >> "$LOGFILE" 2>&1 +} + +# Function to verify data consistency between Master and Standby nodes +verify_database_data() { + local sql_file="${1:-verify_sample_data.sql}" + local standby_ports=("$STANDBY1_PORT" "$STANDBY2_PORT") + local standby_dirs=("${ACTUAL_DIR}/standby1" "${ACTUAL_DIR}/standby2") + + # Verify sample data on all standby nodes + for i in "${!standby_ports[@]}"; do + log_message "🔎 Verifying $sql_file data on standby (Port: ${standby_ports[$i]})..." + run_sql "$sql_file" "$DB_NAME" "${standby_ports[$i]}" "${standby_dirs[$i]}" + verify_output "$sql_file" "${standby_dirs[$i]}" + done +} + +# Function to verify that data is encrypted at rest +verify_encrypted_data_at_rest() { + local table_name="${1:-$TABLE_NAME}" + local search_text="${2:-$SEARCHED_TEXT}" + local pg_port="${3:-$MASTER_PORT}" + local db_name="${4:-$DB_NAME}" + + # Retrieve the data file path + local pg_relation_filepath + local data_dir_path + local file_name + + pg_relation_filepath=$(psql -p "$pg_port" -d "$db_name" -t -c "SELECT pg_relation_filepath('$table_name');" | xargs) + data_dir_path=$(psql -p "$pg_port" -d "$db_name" -t -c "SHOW data_directory;" | xargs) + file_name="$data_dir_path/$pg_relation_filepath" + + # Check if the file exists + if [[ ! -f "$file_name" ]]; then + log_message "❌ Data file not found: $file_name" + return 1 + fi + + # Extract first 10 lines of raw data for reference + local raw_data + raw_data=$(sudo hexdump -C "$file_name" | head -n 10 || true) + echo "$raw_data" >> "$LOGFILE" + + # Check for readable text in the file (unencrypted data detection) + if sudo strings "$file_name" | grep -q "$search_text"; then + log_message "❌ Readable text detected! Data appears UNENCRYPTED." + ((TESTS_FAILED++)) + else + log_message "✅ Data appears to be ENCRYPTED!" + ((TESTS_PASSED++)) + fi } -data_verification() { - # Verify the data on the standby1 - run_sql verify_sample_data.sql $DB_NAME $STANDBY1_PORT "${ACTUAL_DIR}/standby1" - log_message "Verifying sample data on master and standby1" - verify_output verify_sample_data.sql "${ACTUAL_DIR}/standby1" +# Function to verify data consistency between Master and Standby nodes +verify_data_ondisk() { + local standby_ports=("$MASTER_PORT" "$STANDBY1_PORT" "$STANDBY2_PORT") + local standby_labels=("Master" "Standby1" "Standby2") - # Verify the data on the standby2 - run_sql verify_sample_data.sql $DB_NAME $STANDBY2_PORT "${ACTUAL_DIR}/standby2" - log_message "Verifying sample data on master and standby2" - verify_output verify_sample_data.sql "${ACTUAL_DIR}/standby2" + for i in "${!standby_ports[@]}"; do + log_message "🔎 Verifying encryption on ${standby_labels[$i]} (Port: ${standby_ports[$i]})..." + verify_encrypted_data_at_rest "$TABLE_NAME" "$SEARCHED_TEXT" "${standby_ports[$i]}" "$DB_NAME" + done +} - # Add some more data after replication setup into the master - psql -p $MASTER_PORT -d $DB_NAME -f $SQL_DIR/incremental_data.sql > /dev/null +# Initialize pgbench on Master +initialize_pgbench() { + log_message "Initializing pgbench with scale factor $SCALE on database: $DB_NAME and (port: $MASTER_PORT)..." + pgbench -U postgres -i -s $SCALE -d $DB_NAME -p $MASTER_PORT >> $LOGFILE 2>&1 + if [ $? -eq 0 ]; then + log_message "✅ Pgbench Initialization done..." + else + log_message "❌ Pgbench Initialization failed..." + fi +} - # Verify the data on the standby1 - run_sql verify_incremental_data.sql $DB_NAME $STANDBY1_PORT "${ACTUAL_DIR}/standby1" - log_message "Verifying incremental data on master and standby1" - verify_output verify_incremental_data.sql "${ACTUAL_DIR}/standby1" +# Run pgbench Transactions +run_pgbench() { + sync && sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches' + log_message "Running pgbench with $CLIENTS clients and $THREADS threads for $DURATION seconds..." + pgbench -T $DURATION -c $CLIENTS -j $THREADS -M prepared -d $DB_NAME -p $MASTER_PORT >> $LOGFILE 2>&1 + if [ $? -eq 0 ]; then + log_message "✅ Pgbench Run Completed..." + else + log_message "❌ Pgbench Run failed..." + fi +} - # Verify the data on the standby2 - run_sql verify_incremental_data.sql $DB_NAME $STANDBY2_PORT "${ACTUAL_DIR}/standby2" - log_message "Verifying incremental data on master and standby2" - verify_output verify_incremental_data.sql "${ACTUAL_DIR}/standby2" +# Check replication lag before and after running pgbench +check_replication_lag() { + log_message "🔍 Checking Replication Lag on Master..." + psql -p "$MASTER_PORT" -d "$DB_NAME" -c "SELECT * FROM pg_stat_replication;" >> "$LOGFILE" 2>&1 + if [ $? -eq 0 ]; then + log_message "✅ Replication Lag Check passed on Master..." + ((TESTS_PASSED++)) + else + log_message "❌ Replication Lag Check failed on Master..." + ((TESTS_FAILED++)) + fi } +check_replication_wal_stats() { + log_message "🔍 Checking WAL Statistics on Master..." + psql -p "$MASTER_PORT" -d "$DB_NAME" -c "SELECT * FROM pg_stat_wal;" >> "$LOGFILE" 2>&1 -verify_data_ondisk(){ - verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $MASTER_PORT $DB_NAME - verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $STANDBY1_PORT $DB_NAME - verify_encrypted_data_at_rest $TABLE_NAME $SEARCHED_TEXT $STANDBY2_PORT $DB_NAME + if [ $? -eq 0 ]; then + log_message "✅ Replication WAL Statistics Passed on Master..." + ((TESTS_PASSED++)) + else + log_message "❌ Replication WAL Statistics Failed on Master..." + ((TESTS_FAILED++)) + fi } +# Analyze PostgreSQL logs for replication errors +analyze_logs() { + local port="${1:-$MASTER_PORT}" + local data_dir_path=$(psql -p "$port" -d "$DB_NAME" -t -c "SHOW data_directory;" | xargs) + local log_dir_path=$(psql -p "$port" -d "$DB_NAME" -t -c "SHOW log_directory;" | xargs) + local log_file="$data_dir_path/$log_dir_path/postgresql.log" + + # Ensure the log file exists before searching + if [[ ! -f "$log_file" ]]; then + log_message "PostgreSQL log file not found: $log_file" + return 1 + fi + + temp_log=$(mktemp) + + # Extract errors related to replication and WAL for Master and Standby nodes + grep -Ei 'replication|error|fatal|wal' "$log_file" | tail -20 | tee -a "$LOGFILE" > "$temp_log" + + # Check if errors exist in extracted logs + if grep -Ei 'fatal|error' "$temp_log"; then + log_message "❌ Errors detected in PostgreSQL logs! Check $log_file for details." + ((TESTS_FAILED++)) + else + log_message "✅ No Error message in Server log: ($port)" + ((TESTS_PASSED++)) + fi + + # Clean up temporary file + rm -f "$temp_log" +} + +# Function to run the pgbench test suite +pgbench_test_suite() { + echo "=== 🚀 Running pgbench Replication Tests ====" |tee -a $LOGFILE + + # Check replication lag before running pgbench + check_replication_lag + + # Check WAL statistics before running pgbench + check_replication_wal_stats + + # Initialize pgbench on the master node + initialize_pgbench + + # Run pgbench on the master node + run_pgbench + + # Check replication lag after running pgbench + check_replication_lag + + # Check WAL statistics after running pgbench + check_replication_wal_stats + + # Verify that pgbench data was replicated correctly + #pgbench_verification + verify_database_data verify_pgbench_data.sql + + # Verify log files on master node for errors + analyze_logs $MASTER_PORT + # Verify log files on master node for errors + analyze_logs $STANDBY1_PORT + # Verify log files on master node for errors + analyze_logs $STANDBY2_PORT + + echo "== 🚀 Pgbench Tests completed==" |tee -a $LOGFILE +} + +# Function to promote the standby node to master promote_standby() { local standby_data=$1 local standby_log=$standby_data/standby.log pg_ctl -D $standby_data promote -l $standby_log } +# Function to rewind the master node using pg_rewind pg_rewind() { pg_rewind --target-pgdata=$MASTER_DATA --source-server="port=$STANDBY1_PORT user=postgres dbname=$DB_NAME" > $ACTUAL_DIR/pg_rewind.log 2>&1 } + +# Function to summarize test results +summarize_results() { + echo "=============================================" |tee -a $LOGFILE + echo "======== Test Suite Summary =======" |tee -a $LOGFILE + echo "=============================================" |tee -a $LOGFILE + log_message "✅ Tests Passed: $TESTS_PASSED" + log_message "❌ Tests Failed: $TESTS_FAILED" + + if [ "$TESTS_FAILED" -eq 0 ]; then + log_message "✅ ALL TESTS PASSED! Replication is working correctly." + exit 0 + else + log_message "❌ SOME TESTS FAILED! Check logs for details." + exit 1 + fi + echo "=============================================" |tee -a $LOGFILE + echo "======== Test Suite Completed ==============" |tee -a $LOGFILE + echo "=============================================" |tee -a $LOGFILE +} + #====================================================== -# Main Script Execution -main() { - echo "=== Starting replication Test Automation ===" +# Main Function to Run All Tests +run_tests() { + log_message "=== Starting PostgreSQL Replication Test Suite ===" configure_primary_server configure_standby $STANDBY1_DATA $STANDBY1_PORT configure_standby $STANDBY2_DATA $STANDBY2_PORT - data_verification + verify_database_data verify_sample_data.sql + insert_data incremental_data.sql $DB_NAME $MASTER_PORT + verify_database_data verify_incremental_data.sql verify_data_ondisk + pgbench_test_suite #promote_standby - #pg_rewind - echo "=== replication Test Automation Completed! === 🚀" -} + #pg_rewind_master -# Run Main Function -main + summarize_results +} +# Run All Tests +run_tests diff --git a/ci_scripts/backup/sql/verify_pgbench_data.sql b/ci_scripts/backup/sql/verify_pgbench_data.sql new file mode 100644 index 0000000000000..812b158a787ef --- /dev/null +++ b/ci_scripts/backup/sql/verify_pgbench_data.sql @@ -0,0 +1,29 @@ +-- Verify pgbench accounts data is replicated correctly +SELECT COUNT(*) AS total_accounts FROM pgbench_accounts; + +-- Check if pgbench history table matches expected transactions +SELECT COUNT(*) AS total_history FROM pgbench_history; + +-- Check last transaction timestamps to ensure no delay +SELECT MAX(tid) AS last_transaction FROM pgbench_history; + +-- Check data integrity by comparing last 5 accounts +SELECT aid, bid FROM pgbench_accounts ORDER BY aid DESC LIMIT 5; + +-- Check last account ID and balance for consistency +SELECT MAX(aid) AS last_account_id FROM pgbench_accounts; + +-- Detect if the query is running on a Replica (Standby) +SELECT pg_is_in_recovery() AS is_replica; + +-- Only check WAL replay status on standby nodes +--DO $$ +--BEGIN +-- IF pg_is_in_recovery() THEN +-- RAISE NOTICE 'Checking WAL replay status...'; +-- PERFORM pg_last_wal_replay_lsn(), pg_last_wal_receive_lsn(); +-- ELSE +-- RAISE NOTICE 'Skipping WAL check on Master.'; +-- END IF; +--END $$; + diff --git a/ci_scripts/configure-tde-server.sh b/ci_scripts/configure-tde-server.sh index d79e6c1cad7e6..3b6658032af6b 100644 --- a/ci_scripts/configure-tde-server.sh +++ b/ci_scripts/configure-tde-server.sh @@ -16,7 +16,7 @@ export PGPORT="${2:-5432}" if [ -d "$PGDATA" ]; then if pg_ctl -D "$PGDATA" status -o "-p $PGPORT" >/dev/null; then - pg_ctl -D "$PGDATA" stop -o "-p $PGPORT" + pg_ctl -D "$PGDATA" stop -o "-p $PGPORT" >/dev/null; fi rm -rf "$PGDATA" @@ -24,8 +24,8 @@ fi initdb -D "$PGDATA" --set shared_preload_libraries=pg_tde -pg_ctl -D "$PGDATA" start -o "-p $PGPORT" +pg_ctl -D "$PGDATA" start -o "-p $PGPORT" -l "$PGDATA/logfile" psql postgres -f "$SCRIPT_DIR/tde_setup_global.sql" -pg_ctl -D "$PGDATA" restart -o "-p $PGPORT" +pg_ctl -D "$PGDATA" restart -o "-p $PGPORT" -l "$PGDATA/logfile"