diff --git a/.github/actions/save_logs_and_results/action.yml b/.github/actions/save_logs_and_results/action.yml index 0f238835d19..b344c68f2ef 100644 --- a/.github/actions/save_logs_and_results/action.yml +++ b/.github/actions/save_logs_and_results/action.yml @@ -6,7 +6,7 @@ inputs: runs: using: composite steps: - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 name: Upload logs with: name: ${{ inputs.folder }} diff --git a/.github/actions/setup_extension/action.yml b/.github/actions/setup_extension/action.yml index 96b408e7e43..33129f17de6 100644 --- a/.github/actions/setup_extension/action.yml +++ b/.github/actions/setup_extension/action.yml @@ -17,7 +17,7 @@ runs: echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV fi shell: bash - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: name: build-${{ env.PG_MAJOR }} - name: Install Extension diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml index 0b5f581a6a4..ba80ba63afa 100644 --- a/.github/actions/upload_coverage/action.yml +++ b/.github/actions/upload_coverage/action.yml @@ -21,7 +21,7 @@ runs: mkdir -p /tmp/codeclimate cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: path: "/tmp/codeclimate/*.json" - name: codeclimate + name: codeclimate-${{ inputs.flags }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 155ae596e54..a31c74d7367 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -40,8 +40,21 @@ jobs: runs-on: ubuntu-20.04 container: image: ${{ needs.params.outputs.build_image_name }}:latest + volumes: + # see https://github.com/actions/checkout/issues/1474#issuecomment-2604308106 + - /tmp:/__e/node20 options: --user root steps: + # see https://github.com/actions/checkout/issues/1474#issuecomment-2604308106 + - name: Install node.js for GitHub Actions + run: | + apt-get install -y curl ca-certificates && + # Install a Node.js version that works in older Ubuntu containers (read: does not require very recent glibc) + NODE_VERSION=v20.18.1 && + NODE_TAR_FILE=node-$NODE_VERSION-linux-x64-glibc-217.tar.gz && + NODE_URL=https://unofficial-builds.nodejs.org/download/release/$NODE_VERSION/$NODE_TAR_FILE && + curl -Lo /tmp/$NODE_TAR_FILE $NODE_URL && + tar -C /__e/node20 -x --strip-components=1 -f /tmp/$NODE_TAR_FILE - uses: actions/checkout@v3.5.0 - name: Check Snapshots run: | @@ -56,7 +69,7 @@ jobs: - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check C Style @@ -110,14 +123,14 @@ jobs: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Expose $PG_MAJOR to Github Env run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV shell: bash - name: Build run: "./ci/build-citus.sh" shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- @@ -133,9 +146,9 @@ jobs: image_name: - ${{ needs.params.outputs.test_image_name }} pg_version: - - ${{ needs.params.outputs.pg13_version }} - ${{ needs.params.outputs.pg13_version }} - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} make: - check-split - check-multi @@ -204,7 +217,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run Test run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} @@ -238,7 +251,7 @@ jobs: - ${{ needs.params.outputs.pg15_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Test arbitrary configs run: |- @@ -260,10 +273,12 @@ jobs: check-arbitrary-configs parallel=4 CONFIGS=$TESTS - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} codecov_token: ${{ secrets.CODECOV_TOKEN }} test-pg-upgrade: name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade @@ -286,7 +301,7 @@ jobs: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: pg_major: "${{ env.old_pg_major }}" @@ -309,6 +324,8 @@ jobs: if: failure() - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: @@ -324,7 +341,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: skip_installation: true @@ -354,10 +371,12 @@ jobs: done; - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_citus_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_citus_upgrade codecov_token: ${{ secrets.CODECOV_TOKEN }} upload-coverage: if: always() @@ -373,10 +392,11 @@ jobs: - test-citus-upgrade - test-pg-upgrade steps: - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: - name: "codeclimate" - path: "codeclimate" + pattern: codeclimate* + path: codeclimate + merge-multiple: true - name: Upload coverage results to Code Climate run: |- cc-test-reporter sum-coverage codeclimate/*.json -o total.json @@ -388,7 +408,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -406,7 +426,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -425,7 +445,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -438,7 +458,7 @@ jobs: outputs: tests: ${{ steps.detect-regression-tests.outputs.tests }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Detect regression tests need to be ran @@ -473,8 +493,8 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 - - uses: actions/download-artifact@v3.0.1 + - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4.1.8 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6478abf4b69..027f5a0487d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index a666c1cd5df..04ec1837e7a 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -28,13 +28,13 @@ jobs: image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Configure, Build, and Install run: | echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV ./ci/build-citus.sh shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- @@ -46,7 +46,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -67,7 +67,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 56cfc3f2f7f..d191957c726 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -6,6 +6,10 @@ on: workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: get_postgres_versions_from_file: @@ -14,7 +18,7 @@ jobs: pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 2 - name: Get Postgres Versions @@ -42,9 +46,7 @@ jobs: # For this reason, we need to use a "matrix" to generate names of # rpm images, e.g. citus/packaging:centos-7-pg12 packaging_docker_image: - - oraclelinux-7 - oraclelinux-8 - - centos-7 - almalinux-8 - almalinux-9 POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} @@ -55,7 +57,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set Postgres and python parameters for rpm based distros run: | @@ -111,7 +113,6 @@ jobs: - debian-buster-all - debian-bookworm-all - debian-bullseye-all - - ubuntu-bionic-all - ubuntu-focal-all - ubuntu-jammy-all @@ -123,7 +124,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set pg_config path and python parameters for deb based distros run: | diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index 789732d212b..84ef4229fbd 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -395,7 +395,7 @@ citus_task_wait_internal(int64 taskid, BackgroundTaskStatus *desiredStatus) /* sleep for a while, before rechecking the task status */ CHECK_FOR_INTERRUPTS(); - const long delay_ms = 1000; + const long delay_ms = 100; (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH, delay_ms, diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 3ea51d5d935..6dfc51e512b 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -102,9 +102,9 @@ def remove_tar_files(tar_path): def restart_databases(pg_path, rel_data_path, mixed_mode, config): for node_name in config.node_name_to_ports.keys(): - if ( - mixed_mode - and config.node_name_to_ports[node_name] == config.chosen_random_worker_port + if mixed_mode and config.node_name_to_ports[node_name] in ( + config.chosen_random_worker_port, + config.coordinator_port(), ): continue abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) @@ -135,7 +135,10 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): - if mixed_mode and port == config.chosen_random_worker_port: + if mixed_mode and port in ( + config.chosen_random_worker_port, + config.coordinator_port(), + ): continue utils.psql(pg_path, port, "ALTER EXTENSION citus UPDATE;") @@ -145,7 +148,8 @@ def verify_upgrade(config, mixed_mode, node_ports): actual_citus_version = get_actual_citus_version(config.bindir, port) expected_citus_version = MASTER_VERSION if expected_citus_version != actual_citus_version and not ( - mixed_mode and port == config.chosen_random_worker_port + mixed_mode + and port in (config.chosen_random_worker_port, config.coordinator_port()) ): print( "port: {} citus version {} expected {}".format( diff --git a/src/test/regress/expected/background_rebalance_parallel.out b/src/test/regress/expected/background_rebalance_parallel.out index 187f709e447..dbdd963a97b 100644 --- a/src/test/regress/expected/background_rebalance_parallel.out +++ b/src/test/regress/expected/background_rebalance_parallel.out @@ -513,6 +513,12 @@ FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; (8 rows) -- increase citus.max_background_task_executors_per_node +SELECT citus_task_wait(1013, desired_status => 'done'); + citus_task_wait +--------------------------------------------------------------------- + +(1 row) + ALTER SYSTEM SET citus.max_background_task_executors_per_node = 2; SELECT pg_reload_conf(); pg_reload_conf @@ -520,13 +526,13 @@ SELECT pg_reload_conf(); t (1 row) -SELECT citus_task_wait(1015, desired_status => 'running'); +SELECT citus_task_wait(1014, desired_status => 'running'); citus_task_wait --------------------------------------------------------------------- (1 row) -SELECT citus_task_wait(1013, desired_status => 'done'); +SELECT citus_task_wait(1015, desired_status => 'running'); citus_task_wait --------------------------------------------------------------------- diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index e2685c2d792..a559ec4428f 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -107,6 +107,12 @@ SELECT pg_catalog.citus_split_shard_by_split_points( (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out index bae1895f94c..38b82ed6809 100644 --- a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out +++ b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out @@ -210,7 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); -create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1) diff --git a/src/test/regress/expected/multi_move_mx_0.out b/src/test/regress/expected/multi_move_mx_0.out new file mode 100644 index 00000000000..77f904dcad7 --- /dev/null +++ b/src/test/regress/expected/multi_move_mx_0.out @@ -0,0 +1,277 @@ +-- +-- MULTI_MOVE_MX +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1550000; +-- Create mx test tables +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE mx_table_1 (a int); +SELECT create_distributed_table('mx_table_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE mx_table_2 (a int); +SELECT create_distributed_table('mx_table_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE mx_table_3 (a text); +SELECT create_distributed_table('mx_table_3', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Check that the first two tables are colocated +SELECT + logicalrelid, repmodel +FROM + pg_dist_partition +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid; + logicalrelid | repmodel +--------------------------------------------------------------------- + mx_table_1 | s + mx_table_2 | s + mx_table_3 | s +(3 rows) + +-- Check the list of shards +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57637 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57637 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check the data on the worker +\c - - - :worker_2_port +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57637 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57637 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +\c - - - :master_port +-- Check that citus_copy_shard_placement cannot be run with MX tables +SELECT + citus_copy_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; +ERROR: Table 'mx_table_1' is streaming replicated. Shards of streaming replicated tables cannot be copied +-- Move a shard from worker 1 to worker 2 +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- Test changing citus.node_conninfo on the target node affects the +-- CREATE SUBSCRIPTION command for shard move +\c - - - :worker_2_port +ALTER SYSTEM SET citus.node_conninfo TO 'sslrootcert=/non/existing/certificate.crt sslmode=verify-full'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_2_port +-- before reseting citus.node_conninfo, check that CREATE SUBSCRIPTION +-- with citus_use_authinfo takes into account node_conninfo even when +-- one of host, port, or user parameters are not specified. +-- +-- We need to specify host and port to not get an hba error, so we test +-- only with ommitting user. +CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637' +PUBLICATION pub_01 WITH (citus_use_authinfo=true); +ERROR: could not connect to the publisher: connection to server at "localhost" (::1), port 57637 failed: root certificate file "/non/existing/certificate.crt" does not exist +Either provide the file or change sslmode to disable server certificate verification. +ALTER SYSTEM RESET citus.node_conninfo; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :master_port +-- Check that the shard and its colocated shard is moved, but not the other shards +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + (logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass) + AND shardstate != 4 +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57638 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57638 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check that the changes are made in the worker as well +\c - - - :worker_2_port +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57638 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57638 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check that the UDFs cannot be called from the workers +SELECT + citus_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_2_port +ORDER BY + shardid +LIMIT 1 OFFSET 1; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_2_port +ORDER BY + shardid +LIMIT 1 OFFSET 1; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- Check that shards of a table with GENERATED columns can be moved. +\c - - - :master_port +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE mx_table_with_generated_column (a int, b int GENERATED ALWAYS AS ( a + 3 ) STORED, c int); +SELECT create_distributed_table('mx_table_with_generated_column', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Check that dropped columns are handled properly in a move. +ALTER TABLE mx_table_with_generated_column DROP COLUMN c; +-- Move a shard from worker 1 to worker 2 +SELECT + citus_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_with_generated_column'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- Cleanup +\c - - - :master_port +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +DROP TABLE mx_table_with_generated_column; +DROP TABLE mx_table_1; +DROP TABLE mx_table_2; +DROP TABLE mx_table_3; diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 62f197c30bb..a3912ec8e90 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out index 15deba0c0af..62271f9a743 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 7b02ca5cc2f..34d2110252a 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -297,10 +297,12 @@ sub generate_hba open(my $fh, ">", catfile($TMP_CHECKDIR, $nodename, "data", "pg_hba.conf")) or die "could not open pg_hba.conf"; - print $fh "host all alice,bob localhost md5\n"; + print $fh "host all alice,bob 127.0.0.1/32 md5\n"; + print $fh "host all alice,bob ::1/128 md5\n"; print $fh "host all all 127.0.0.1/32 trust\n"; print $fh "host all all ::1/128 trust\n"; - print $fh "host replication postgres localhost trust\n"; + print $fh "host replication postgres 127.0.0.1/32 trust\n"; + print $fh "host replication postgres ::1/128 trust\n"; close $fh; } diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 67c20a2b213..411faf8893f 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -22,6 +22,7 @@ setup teardown { + SELECT wait_until_metadata_sync(); DROP FUNCTION trigger_metadata_sync(); DROP TABLE deadlock_detection_test; DROP TABLE t2; diff --git a/src/test/regress/sql/background_rebalance_parallel.sql b/src/test/regress/sql/background_rebalance_parallel.sql index e55fd93bb0a..2eb952b671a 100644 --- a/src/test/regress/sql/background_rebalance_parallel.sql +++ b/src/test/regress/sql/background_rebalance_parallel.sql @@ -221,10 +221,12 @@ SELECT job_id, task_id, status, nodes_involved FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; -- increase citus.max_background_task_executors_per_node +SELECT citus_task_wait(1013, desired_status => 'done'); ALTER SYSTEM SET citus.max_background_task_executors_per_node = 2; SELECT pg_reload_conf(); + +SELECT citus_task_wait(1014, desired_status => 'running'); SELECT citus_task_wait(1015, desired_status => 'running'); -SELECT citus_task_wait(1013, desired_status => 'done'); -- show that at most 2 tasks per node are running -- among the tasks that are not blocked diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index ba3f952154f..480d81b88b3 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -79,6 +79,8 @@ SELECT pg_catalog.citus_split_shard_by_split_points( ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql index 0b9826cb749..b1017768d6b 100644 --- a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql +++ b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql @@ -84,7 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now( select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); -create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); diff --git a/src/test/regress/sql/multi_mx_insert_select_repartition.sql b/src/test/regress/sql/multi_mx_insert_select_repartition.sql index 4a9c8c96fa9..b206c6e4e77 100644 --- a/src/test/regress/sql/multi_mx_insert_select_repartition.sql +++ b/src/test/regress/sql/multi_mx_insert_select_repartition.sql @@ -55,6 +55,8 @@ SET citus.log_local_commands to on; -- INSERT .. SELECT via repartitioning with local execution BEGIN; select count(*) from source_table WHERE a = 1; + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; ROLLBACK;