From 7afca3e83e14f9d991998a0ee88cb9107b04e528 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 31 Jan 2025 14:23:01 +0300 Subject: [PATCH 01/18] Upgrade upload-artifacts action to 4.6.0 (cherry picked from commit b886cfa22316e1047f5c725f0f6b96b08c607ab2) --- .github/actions/save_logs_and_results/action.yml | 2 +- .github/actions/upload_coverage/action.yml | 2 +- .github/workflows/build_and_test.yml | 2 +- .github/workflows/flaky_test_debugging.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/actions/save_logs_and_results/action.yml b/.github/actions/save_logs_and_results/action.yml index 0f238835d19..b344c68f2ef 100644 --- a/.github/actions/save_logs_and_results/action.yml +++ b/.github/actions/save_logs_and_results/action.yml @@ -6,7 +6,7 @@ inputs: runs: using: composite steps: - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 name: Upload logs with: name: ${{ inputs.folder }} diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml index 0b5f581a6a4..abffa784a03 100644 --- a/.github/actions/upload_coverage/action.yml +++ b/.github/actions/upload_coverage/action.yml @@ -21,7 +21,7 @@ runs: mkdir -p /tmp/codeclimate cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: path: "/tmp/codeclimate/*.json" name: codeclimate diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 155ae596e54..409377f5a61 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -117,7 +117,7 @@ jobs: - name: Build run: "./ci/build-citus.sh" shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index a666c1cd5df..574cb7813ff 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -34,7 +34,7 @@ jobs: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV ./ci/build-citus.sh shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- From 518414f0d04e3ba80639c23a4adc6e8731ffe5ac Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 31 Jan 2025 14:39:55 +0300 Subject: [PATCH 02/18] Upgrade download-artifacts action to 4.1.8 (cherry picked from commit cbe0de33a67772b21a9984b57f6133ca7316d2dc) --- .github/actions/setup_extension/action.yml | 2 +- .github/workflows/build_and_test.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/setup_extension/action.yml b/.github/actions/setup_extension/action.yml index 96b408e7e43..33129f17de6 100644 --- a/.github/actions/setup_extension/action.yml +++ b/.github/actions/setup_extension/action.yml @@ -17,7 +17,7 @@ runs: echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV fi shell: bash - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: name: build-${{ env.PG_MAJOR }} - name: Install Extension diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 409377f5a61..8b4f241890d 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -373,7 +373,7 @@ jobs: - test-citus-upgrade - test-pg-upgrade steps: - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: name: "codeclimate" path: "codeclimate" @@ -474,7 +474,7 @@ jobs: matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - uses: actions/checkout@v3.5.0 - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- From 3a6f6c07aa16c24d8cd3982e86b04f8ef6673fcc Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Fri, 31 Jan 2025 15:46:42 +0300 Subject: [PATCH 03/18] Avoid publishing artifacts with conflicting names .. as documented in actions/upload-artifact#480. (cherry picked from commit 26f16a76542eca2caca7e25848663286c0c8c6d5) --- .github/actions/upload_coverage/action.yml | 2 +- .github/workflows/build_and_test.yml | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml index abffa784a03..ba80ba63afa 100644 --- a/.github/actions/upload_coverage/action.yml +++ b/.github/actions/upload_coverage/action.yml @@ -24,4 +24,4 @@ runs: - uses: actions/upload-artifact@v4.6.0 with: path: "/tmp/codeclimate/*.json" - name: codeclimate + name: codeclimate-${{ inputs.flags }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 8b4f241890d..85229220334 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -260,10 +260,12 @@ jobs: check-arbitrary-configs parallel=4 CONFIGS=$TESTS - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} codecov_token: ${{ secrets.CODECOV_TOKEN }} test-pg-upgrade: name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade @@ -309,6 +311,8 @@ jobs: if: failure() - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: @@ -354,10 +358,12 @@ jobs: done; - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_citus_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_citus_upgrade codecov_token: ${{ secrets.CODECOV_TOKEN }} upload-coverage: if: always() @@ -375,8 +381,9 @@ jobs: steps: - uses: actions/download-artifact@v4.1.8 with: - name: "codeclimate" - path: "codeclimate" + pattern: codeclimate* + path: codeclimate + merge-multiple: true - name: Upload coverage results to Code Climate run: |- cc-test-reporter sum-coverage codeclimate/*.json -o total.json From 7950b3a2ee2f6f11e0e1c87b755fa376c5f9e4c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Fri, 12 Jul 2024 12:25:12 +0300 Subject: [PATCH 04/18] Removes el/7 and ol/7 as runners (#7650) Removes el/7 and ol/7 as runners and update checkout action to v4 We use EL/7 and OL/7 runners to test packaging for these distributions. However, for the past two weeks, we've encountered errors during the checkout step in the pipelines. The error message is as follows: ``` /__e/node20/bin/node: /lib64/libm.so.6: version `GLIBC_2.27' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib64/libstdc++.so.6: version `GLIBCXX_3.4.20' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib64/libstdc++.so.6: version `CXXABI_1.3.9' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib64/libstdc++.so.6: version `GLIBCXX_3.4.21' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib64/libc.so.6: version `GLIBC_2.28' not found (required by /__e/node20/bin/node) /__e/node20/bin/node: /lib64/libc.so.6: version `GLIBC_2.25' not found (required by /__e/node20/bin/node) ``` The GCC version within the EL/7 and OL/7 Docker images is 2.17, and we cannot upgrade it. Therefore, we need to remove these images from the packaging test pipelines. Consequently, we will no longer verify if the code builds for EL/7 and OL/7. However, we are not using these packaging images as runners within the packaging infrastructure, so we can continue to use these images for packaging. Additional Info: I learned that Marlin team fully dropped the el/7 support so we will drop in further releases as well (cherry picked from commit c603c3ed7446b2618f48537111a0bd5bf70fbc2b) --- .github/workflows/packaging-test-pipelines.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 56cfc3f2f7f..21e35c47205 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -42,9 +42,7 @@ jobs: # For this reason, we need to use a "matrix" to generate names of # rpm images, e.g. citus/packaging:centos-7-pg12 packaging_docker_image: - - oraclelinux-7 - oraclelinux-8 - - centos-7 - almalinux-8 - almalinux-9 POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} From 9e50c45dbe585de1272fa42cc4439720e3aa95d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Thu, 24 Aug 2023 10:30:33 +0300 Subject: [PATCH 05/18] Removes ubuntu/bionic from packaging pipelines (#7142) DESCRIPTION: Removes ubuntu/bionic from packaging pipelines Since pg16 beta is not available for ubuntu/bionic and ubuntu/bionic support is EOL, I need to remove this os from pipeline https://ubuntu.com/blog/ubuntu-18-04-eol-for-devices Additionally, added concurrency support for GH Actions Packaging pipeline (cherry picked from commit 553780e3f17e0dcd4a7194f5d77da02bb64241ef) --- .github/workflows/packaging-test-pipelines.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 21e35c47205..a8f8a3e718e 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -6,6 +6,10 @@ on: workflow_dispatch: +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: get_postgres_versions_from_file: @@ -109,7 +113,6 @@ jobs: - debian-buster-all - debian-bookworm-all - debian-bullseye-all - - ubuntu-bionic-all - ubuntu-focal-all - ubuntu-jammy-all From 73e469553a59c12ce8f99f2c651ad8bbac180ea9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20=C4=B0ndibay?= Date: Fri, 31 May 2024 20:52:17 +0300 Subject: [PATCH 06/18] Updates github checkout actions to v4 (#7611) (cherry picked from commit 3fe22406e62fb40da12a0d91f3ecc0cba81cdb24) --- .github/workflows/build_and_test.yml | 22 +++++++++---------- .github/workflows/codeql.yml | 2 +- .github/workflows/flaky_test_debugging.yml | 6 ++--- .../workflows/packaging-test-pipelines.yml | 6 ++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 85229220334..48d087bb2f7 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -56,7 +56,7 @@ jobs: - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Check C Style @@ -110,7 +110,7 @@ jobs: image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Expose $PG_MAJOR to Github Env run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV shell: bash @@ -204,7 +204,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run Test run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} @@ -238,7 +238,7 @@ jobs: - ${{ needs.params.outputs.pg15_version }} parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Test arbitrary configs run: |- @@ -288,7 +288,7 @@ jobs: old_pg_major: ${{ matrix.old_pg_major }} new_pg_major: ${{ matrix.new_pg_major }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: pg_major: "${{ env.old_pg_major }}" @@ -328,7 +328,7 @@ jobs: - params - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" with: skip_installation: true @@ -395,7 +395,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -413,7 +413,7 @@ jobs: needs: - build steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: azure/login@v1 with: creds: ${{ secrets.AZURE_CREDENTIALS }} @@ -432,7 +432,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -445,7 +445,7 @@ jobs: outputs: tests: ${{ steps.detect-regression-tests.outputs.tests }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Detect regression tests need to be ran @@ -480,7 +480,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: actions/download-artifact@v4.1.8 - uses: "./.github/actions/setup_extension" - name: Run minimal tests diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6478abf4b69..027f5a0487d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index 574cb7813ff..04ec1837e7a 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -28,7 +28,7 @@ jobs: image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Configure, Build, and Install run: | echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV @@ -46,7 +46,7 @@ jobs: outputs: json: ${{ steps.parallelization.outputs.json }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/parallelization" id: parallelization with: @@ -67,7 +67,7 @@ jobs: fail-fast: false matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index a8f8a3e718e..d191957c726 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -18,7 +18,7 @@ jobs: pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 2 - name: Get Postgres Versions @@ -57,7 +57,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set Postgres and python parameters for rpm based distros run: | @@ -124,7 +124,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set pg_config path and python parameters for deb based distros run: | From b0992b922b4993780cec4ab30ce98571f3e9bb14 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 4 Feb 2025 01:00:08 +0300 Subject: [PATCH 07/18] Workaround the the runner issue in check-sql-snapshots (cherry picked from commit 4bf9a7b7f030bea4fc3b3f8a8709f2de92f60c73) --- .github/workflows/build_and_test.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 48d087bb2f7..03ea649e8bd 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -40,8 +40,21 @@ jobs: runs-on: ubuntu-20.04 container: image: ${{ needs.params.outputs.build_image_name }}:latest + volumes: + # see https://github.com/actions/checkout/issues/1474#issuecomment-2604308106 + - /tmp:/__e/node20 options: --user root steps: + # see https://github.com/actions/checkout/issues/1474#issuecomment-2604308106 + - name: Install node.js for GitHub Actions + run: | + apt-get install -y curl ca-certificates && + # Install a Node.js version that works in older Ubuntu containers (read: does not require very recent glibc) + NODE_VERSION=v20.18.1 && + NODE_TAR_FILE=node-$NODE_VERSION-linux-x64-glibc-217.tar.gz && + NODE_URL=https://unofficial-builds.nodejs.org/download/release/$NODE_VERSION/$NODE_TAR_FILE && + curl -Lo /tmp/$NODE_TAR_FILE $NODE_URL && + tar -C /__e/node20 -x --strip-components=1 -f /tmp/$NODE_TAR_FILE - uses: actions/checkout@v3.5.0 - name: Check Snapshots run: | From df3727be4e0030f8a6061977c48d8b29a6f78efa Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Thu, 4 Jan 2024 15:16:45 +0300 Subject: [PATCH 08/18] Fix foreign_key_to_reference_shard_rebalance test (#7400) foreign_key_to_reference_shard_rebalance failed because partition of 2024 year does not exist, fixed by add default partition. Co-authored-by: chuhx <148182736+cstarc1@users.noreply.github.com> (cherry picked from commit 968ac74cdef96b76f77fcd533c6464fad536d9b0) --- .../expected/foreign_key_to_reference_shard_rebalance.out | 2 +- .../regress/sql/foreign_key_to_reference_shard_rebalance.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out index bae1895f94c..38b82ed6809 100644 --- a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out +++ b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out @@ -210,7 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); -create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1) diff --git a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql index 0b9826cb749..b1017768d6b 100644 --- a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql +++ b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql @@ -84,7 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now( select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); -create table partition_3_with_fkey partition of partitioned_tbl_with_fkey for values from ('2024-01-01') to ('2024-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); From 1b39f4ba7c83aff42a2ab14cd390ce3ee3c8d351 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Fri, 14 Jun 2024 16:20:23 +0200 Subject: [PATCH 09/18] Fix CI issues after Github Actions networking changes (#7624) For some reason using localhost in our hba file doesn't have the intended effect anymore in our Github Actions runners. Probably because of some networking change (IPv6 maybe) or some change in the `/etc/hosts` file. Replacing localhost with the equivalent loopback IPv4 and IPv6 addresses resolved this issue. (cherry picked from commit 8c9de08b76332308deb9fd082d0d00f4afba8cd3) --- src/test/regress/pg_regress_multi.pl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 7b02ca5cc2f..34d2110252a 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -297,10 +297,12 @@ sub generate_hba open(my $fh, ">", catfile($TMP_CHECKDIR, $nodename, "data", "pg_hba.conf")) or die "could not open pg_hba.conf"; - print $fh "host all alice,bob localhost md5\n"; + print $fh "host all alice,bob 127.0.0.1/32 md5\n"; + print $fh "host all alice,bob ::1/128 md5\n"; print $fh "host all all 127.0.0.1/32 trust\n"; print $fh "host all all ::1/128 trust\n"; - print $fh "host replication postgres localhost trust\n"; + print $fh "host replication postgres 127.0.0.1/32 trust\n"; + print $fh "host replication postgres ::1/128 trust\n"; close $fh; } From 8fdeb2a6c50d2d702edd1fc713c94d45eeeaaaf0 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 4 Feb 2025 13:08:06 +0300 Subject: [PATCH 10/18] Actually test PG15 in CI instead of trying to test PG13 twice --- .github/workflows/build_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 03ea649e8bd..a31c74d7367 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -146,9 +146,9 @@ jobs: image_name: - ${{ needs.params.outputs.test_image_name }} pg_version: - - ${{ needs.params.outputs.pg13_version }} - ${{ needs.params.outputs.pg13_version }} - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} make: - check-split - check-multi From 832960b690d3702e2526bf1af9acde27277d445f Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Fri, 5 May 2023 16:47:01 +0300 Subject: [PATCH 11/18] Fixes flakiness in background_rebalance_parallel test (#6910) Fixes the following flaky outputs by decreasing citus_task_wait loop interval, and changing the order of wait commands. https://app.circleci.com/pipelines/github/citusdata/citus/32102/workflows/19958297-6c7e-49ef-9bc2-8efe8aacb96f/jobs/1089589 ``` diff SELECT job_id, task_id, status, nodes_involved FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; job_id | task_id | status | nodes_involved --------+---------+----------+---------------- 17779 | 1013 | done | {50,56} 17779 | 1014 | running | {50,57} - 17779 | 1015 | running | {50,56} - 17779 | 1016 | blocked | {50,57} + 17779 | 1015 | done | {50,56} + 17779 | 1016 | running | {50,57} 17779 | 1017 | runnable | {50,56} 17779 | 1018 | blocked | {50,57} 17779 | 1019 | runnable | {50,56} 17779 | 1020 | blocked | {50,57} (8 rows) ``` https://github.com/citusdata/citus/pull/6893#issuecomment-1525661408 ```diff SELECT job_id, task_id, status, nodes_involved FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; job_id | task_id | status | nodes_involved --------+---------+----------+---------------- 17779 | 1013 | done | {50,56} - 17779 | 1014 | running | {50,57} + 17779 | 1014 | runnable | {50,57} 17779 | 1015 | running | {50,56} 17779 | 1016 | blocked | {50,57} 17779 | 1017 | runnable | {50,56} 17779 | 1018 | blocked | {50,57} 17779 | 1019 | runnable | {50,56} 17779 | 1020 | blocked | {50,57} (8 rows) ``` (cherry picked from commit 905fd46410544c98aac2be92d188c52cc0bb2802) --- src/backend/distributed/utils/background_jobs.c | 2 +- .../regress/expected/background_rebalance_parallel.out | 10 ++++++++-- src/test/regress/sql/background_rebalance_parallel.sql | 4 +++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index 789732d212b..84ef4229fbd 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -395,7 +395,7 @@ citus_task_wait_internal(int64 taskid, BackgroundTaskStatus *desiredStatus) /* sleep for a while, before rechecking the task status */ CHECK_FOR_INTERRUPTS(); - const long delay_ms = 1000; + const long delay_ms = 100; (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH, delay_ms, diff --git a/src/test/regress/expected/background_rebalance_parallel.out b/src/test/regress/expected/background_rebalance_parallel.out index 187f709e447..dbdd963a97b 100644 --- a/src/test/regress/expected/background_rebalance_parallel.out +++ b/src/test/regress/expected/background_rebalance_parallel.out @@ -513,6 +513,12 @@ FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; (8 rows) -- increase citus.max_background_task_executors_per_node +SELECT citus_task_wait(1013, desired_status => 'done'); + citus_task_wait +--------------------------------------------------------------------- + +(1 row) + ALTER SYSTEM SET citus.max_background_task_executors_per_node = 2; SELECT pg_reload_conf(); pg_reload_conf @@ -520,13 +526,13 @@ SELECT pg_reload_conf(); t (1 row) -SELECT citus_task_wait(1015, desired_status => 'running'); +SELECT citus_task_wait(1014, desired_status => 'running'); citus_task_wait --------------------------------------------------------------------- (1 row) -SELECT citus_task_wait(1013, desired_status => 'done'); +SELECT citus_task_wait(1015, desired_status => 'running'); citus_task_wait --------------------------------------------------------------------- diff --git a/src/test/regress/sql/background_rebalance_parallel.sql b/src/test/regress/sql/background_rebalance_parallel.sql index e55fd93bb0a..2eb952b671a 100644 --- a/src/test/regress/sql/background_rebalance_parallel.sql +++ b/src/test/regress/sql/background_rebalance_parallel.sql @@ -221,10 +221,12 @@ SELECT job_id, task_id, status, nodes_involved FROM pg_dist_background_task WHERE job_id in (:job_id) ORDER BY task_id; -- increase citus.max_background_task_executors_per_node +SELECT citus_task_wait(1013, desired_status => 'done'); ALTER SYSTEM SET citus.max_background_task_executors_per_node = 2; SELECT pg_reload_conf(); + +SELECT citus_task_wait(1014, desired_status => 'running'); SELECT citus_task_wait(1015, desired_status => 'running'); -SELECT citus_task_wait(1013, desired_status => 'done'); -- show that at most 2 tasks per node are running -- among the tasks that are not blocked From 7a4676b62dd1ab666112e2f4cac33a219f213300 Mon Sep 17 00:00:00 2001 From: Naisila Puka <37271756+naisila@users.noreply.github.com> Date: Wed, 8 Nov 2023 15:15:33 +0300 Subject: [PATCH 12/18] Fix flaky multi_mx_insert_select_repartition test (#7331) https://github.com/citusdata/citus/actions/runs/6745019678/attempts/1#summary-18336188930 ```diff insert into target_table SELECT a*2 FROM source_table RETURNING a; -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes +NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_3940758121873413_from_4213584_to_0','repartitioned_results_3940758121873413_from_4213582_to_0']::text[],'localhost',57638) bytes ``` The elements in the array passed to `fetch_intermediate_results` are the same, but in the opposite order than expected. To fix this flakiness, we can omit the `"SELECT bytes FROM fetch_intermediate_results..."` line. From the following logs, it is understandable that the intermediate results have been fetched. (cherry picked from commit 0dc41ee5a07a6fad9e35fc7af9f279d627e159b2) --- .../regress/expected/multi_mx_insert_select_repartition.out | 3 ++- .../regress/expected/multi_mx_insert_select_repartition_0.out | 3 ++- src/test/regress/sql/multi_mx_insert_select_repartition.sql | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 62f197c30bb..a3912ec8e90 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out index 15deba0c0af..62271f9a743 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/sql/multi_mx_insert_select_repartition.sql b/src/test/regress/sql/multi_mx_insert_select_repartition.sql index 4a9c8c96fa9..b206c6e4e77 100644 --- a/src/test/regress/sql/multi_mx_insert_select_repartition.sql +++ b/src/test/regress/sql/multi_mx_insert_select_repartition.sql @@ -55,6 +55,8 @@ SET citus.log_local_commands to on; -- INSERT .. SELECT via repartitioning with local execution BEGIN; select count(*) from source_table WHERE a = 1; + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; ROLLBACK; From 1a31fb1fb220e768905255162e488a9e5c893457 Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Wed, 1 Nov 2023 17:21:12 +0100 Subject: [PATCH 13/18] Fix flaky citus_non_blocking_split_shard_cleanup (#7311) Sometimes in CI citus_non_blocking_split_shard_cleanup failed like this: ```diff --- /__w/citus/citus/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out.modified 2023-11-01 15:07:14.280551207 +0000 +++ /__w/citus/citus/src/test/regress/results/citus_non_blocking_split_shard_cleanup.out.modified 2023-11-01 15:07:14.292551358 +0000 @@ -106,21 +106,22 @@ ----------------------------------- (1 row) \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up SELECT slot_name FROM pg_replication_slots; slot_name --------------------------------- -(0 rows) + citus_shard_split_slot_19_10_17 +(1 row) -- Publications should be cleanedup SELECT count(*) FROM pg_publication; count ``` It's expected that the replication slot is sometimes not cleaned up if we don't wait until resource cleanup completes. This PR starts doing that here. (cherry picked from commit e3c93c303dec623f6e196ca8f2ca1d1a20c51e6c) --- .../expected/citus_non_blocking_split_shard_cleanup.out | 6 ++++++ .../regress/sql/citus_non_blocking_split_shard_cleanup.sql | 2 ++ 2 files changed, 8 insertions(+) diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index e2685c2d792..a559ec4428f 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -107,6 +107,12 @@ SELECT pg_catalog.citus_split_shard_by_split_points( (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index ba3f952154f..480d81b88b3 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -79,6 +79,8 @@ SELECT pg_catalog.citus_split_shard_by_split_points( ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up From aeaf19d27c8e02ce85c108cb7e494c8efcc2b63b Mon Sep 17 00:00:00 2001 From: Jelte Fennema-Nio Date: Thu, 2 Nov 2023 10:39:05 +0100 Subject: [PATCH 14/18] Fix flaky isolation_metadata_sync_deadlock (#7312) Sometimes isolation_metadata_sync_deadlock fails in CI like this: ```diff diff -dU10 -w /__w/citus/citus/src/test/regress/expected/isolation_metadata_sync_deadlock.out /__w/citus/citus/src/test/regress/results/isolation_metadata_sync_deadlock.out --- /__w/citus/citus/src/test/regress/expected/isolation_metadata_sync_deadlock.out.modified 2023-11-01 16:03:15.090199229 +0000 +++ /__w/citus/citus/src/test/regress/results/isolation_metadata_sync_deadlock.out.modified 2023-11-01 16:03:15.098199312 +0000 @@ -110,10 +110,14 @@ t (1 row) step s2-stop-connection: SELECT stop_session_level_connection_to_node(); stop_session_level_connection_to_node ------------------------------------- (1 row) + +teardown failed: ERROR: localhost:57638 is a metadata node, but is out of sync +HINT: If the node is up, wait until metadata gets synced to it and try again. +CONTEXT: SQL statement "SELECT master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name)" ``` Source: https://github.com/citusdata/citus/actions/runs/6721938040/attempts/1#summary-18268946448 To fix this we now wait for the metadata to be fully synced to all nodes at the start of the teardown steps. (cherry picked from commit a6e86884f6aa526476b6a0586848c801db29cc66) --- src/test/regress/spec/isolation_metadata_sync_deadlock.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 67c20a2b213..411faf8893f 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -22,6 +22,7 @@ setup teardown { + SELECT wait_until_metadata_sync(); DROP FUNCTION trigger_metadata_sync(); DROP TABLE deadlock_detection_test; DROP TABLE t2; From 3953fe4b3226aa90a4d2bcd87d2084b4fc2bea59 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 4 Feb 2025 14:16:24 +0300 Subject: [PATCH 15/18] Add an alternative output for multi_move_mx --- src/test/regress/expected/multi_move_mx_0.out | 277 ++++++++++++++++++ 1 file changed, 277 insertions(+) create mode 100644 src/test/regress/expected/multi_move_mx_0.out diff --git a/src/test/regress/expected/multi_move_mx_0.out b/src/test/regress/expected/multi_move_mx_0.out new file mode 100644 index 00000000000..77f904dcad7 --- /dev/null +++ b/src/test/regress/expected/multi_move_mx_0.out @@ -0,0 +1,277 @@ +-- +-- MULTI_MOVE_MX +-- +ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1550000; +-- Create mx test tables +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE mx_table_1 (a int); +SELECT create_distributed_table('mx_table_1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE mx_table_2 (a int); +SELECT create_distributed_table('mx_table_2', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE mx_table_3 (a text); +SELECT create_distributed_table('mx_table_3', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Check that the first two tables are colocated +SELECT + logicalrelid, repmodel +FROM + pg_dist_partition +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid; + logicalrelid | repmodel +--------------------------------------------------------------------- + mx_table_1 | s + mx_table_2 | s + mx_table_3 | s +(3 rows) + +-- Check the list of shards +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57637 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57637 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check the data on the worker +\c - - - :worker_2_port +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57637 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57637 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +\c - - - :master_port +-- Check that citus_copy_shard_placement cannot be run with MX tables +SELECT + citus_copy_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; +ERROR: Table 'mx_table_1' is streaming replicated. Shards of streaming replicated tables cannot be copied +-- Move a shard from worker 1 to worker 2 +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; + master_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- Test changing citus.node_conninfo on the target node affects the +-- CREATE SUBSCRIPTION command for shard move +\c - - - :worker_2_port +ALTER SYSTEM SET citus.node_conninfo TO 'sslrootcert=/non/existing/certificate.crt sslmode=verify-full'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :worker_2_port +-- before reseting citus.node_conninfo, check that CREATE SUBSCRIPTION +-- with citus_use_authinfo takes into account node_conninfo even when +-- one of host, port, or user parameters are not specified. +-- +-- We need to specify host and port to not get an hba error, so we test +-- only with ommitting user. +CREATE SUBSCRIPTION subs_01 CONNECTION 'host=''localhost'' port=57637' +PUBLICATION pub_01 WITH (citus_use_authinfo=true); +ERROR: could not connect to the publisher: connection to server at "localhost" (::1), port 57637 failed: root certificate file "/non/existing/certificate.crt" does not exist +Either provide the file or change sslmode to disable server certificate verification. +ALTER SYSTEM RESET citus.node_conninfo; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +\c - - - :master_port +-- Check that the shard and its colocated shard is moved, but not the other shards +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + (logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass) + AND shardstate != 4 +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57638 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57638 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check that the changes are made in the worker as well +\c - - - :worker_2_port +SELECT + logicalrelid, shardid, nodename, nodeport +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + OR logicalrelid = 'mx_table_2'::regclass + OR logicalrelid = 'mx_table_3'::regclass +ORDER BY + logicalrelid, shardid; + logicalrelid | shardid | nodename | nodeport +--------------------------------------------------------------------- + mx_table_1 | 1550000 | localhost | 57638 + mx_table_1 | 1550001 | localhost | 57638 + mx_table_1 | 1550002 | localhost | 57637 + mx_table_1 | 1550003 | localhost | 57638 + mx_table_2 | 1550004 | localhost | 57638 + mx_table_2 | 1550005 | localhost | 57638 + mx_table_2 | 1550006 | localhost | 57637 + mx_table_2 | 1550007 | localhost | 57638 + mx_table_3 | 1550008 | localhost | 57637 + mx_table_3 | 1550009 | localhost | 57638 + mx_table_3 | 1550010 | localhost | 57637 + mx_table_3 | 1550011 | localhost | 57638 +(12 rows) + +-- Check that the UDFs cannot be called from the workers +SELECT + citus_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_2_port +ORDER BY + shardid +LIMIT 1 OFFSET 1; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +SELECT + master_move_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_1'::regclass + AND nodeport = :worker_2_port +ORDER BY + shardid +LIMIT 1 OFFSET 1; +ERROR: operation is not allowed on this node +HINT: Connect to the coordinator and run it again. +-- Check that shards of a table with GENERATED columns can be moved. +\c - - - :master_port +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +CREATE TABLE mx_table_with_generated_column (a int, b int GENERATED ALWAYS AS ( a + 3 ) STORED, c int); +SELECT create_distributed_table('mx_table_with_generated_column', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Check that dropped columns are handled properly in a move. +ALTER TABLE mx_table_with_generated_column DROP COLUMN c; +-- Move a shard from worker 1 to worker 2 +SELECT + citus_move_shard_placement(shardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical') +FROM + pg_dist_shard NATURAL JOIN pg_dist_shard_placement +WHERE + logicalrelid = 'mx_table_with_generated_column'::regclass + AND nodeport = :worker_1_port +ORDER BY + shardid +LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +-- Cleanup +\c - - - :master_port +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +DROP TABLE mx_table_with_generated_column; +DROP TABLE mx_table_1; +DROP TABLE mx_table_2; +DROP TABLE mx_table_3; From 3bc9c751e03cafa7ae20887b9c2880c5d5b4b6c2 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 26 Sep 2023 17:52:52 +0300 Subject: [PATCH 16/18] Fix mixed Citus upgrade tests (#7218) When testing rolling Citus upgrades, coordinator should not be upgraded until we upgrade all the workers. --------- Co-authored-by: Jelte Fennema-Nio (-partially- cherry picked from commit 27ac44eb2a6be1c12c06f193d8d8511509a54bca) --- .../citus_tests/upgrade/citus_upgrade_test.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 3ea51d5d935..6dfc51e512b 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -102,9 +102,9 @@ def remove_tar_files(tar_path): def restart_databases(pg_path, rel_data_path, mixed_mode, config): for node_name in config.node_name_to_ports.keys(): - if ( - mixed_mode - and config.node_name_to_ports[node_name] == config.chosen_random_worker_port + if mixed_mode and config.node_name_to_ports[node_name] in ( + config.chosen_random_worker_port, + config.coordinator_port(), ): continue abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) @@ -135,7 +135,10 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): - if mixed_mode and port == config.chosen_random_worker_port: + if mixed_mode and port in ( + config.chosen_random_worker_port, + config.coordinator_port(), + ): continue utils.psql(pg_path, port, "ALTER EXTENSION citus UPDATE;") @@ -145,7 +148,8 @@ def verify_upgrade(config, mixed_mode, node_ports): actual_citus_version = get_actual_citus_version(config.bindir, port) expected_citus_version = MASTER_VERSION if expected_citus_version != actual_citus_version and not ( - mixed_mode and port == config.chosen_random_worker_port + mixed_mode + and port in (config.chosen_random_worker_port, config.coordinator_port()) ): print( "port: {} citus version {} expected {}".format( From 117184c8bc68af7ca25571cbeb75bd00a6d7db8f Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 4 Feb 2025 16:15:08 +0300 Subject: [PATCH 17/18] Revert "Fix mixed Citus upgrade tests (#7218)" This reverts commit 3bc9c751e03cafa7ae20887b9c2880c5d5b4b6c2. --- .../citus_tests/upgrade/citus_upgrade_test.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 6dfc51e512b..3ea51d5d935 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -102,9 +102,9 @@ def remove_tar_files(tar_path): def restart_databases(pg_path, rel_data_path, mixed_mode, config): for node_name in config.node_name_to_ports.keys(): - if mixed_mode and config.node_name_to_ports[node_name] in ( - config.chosen_random_worker_port, - config.coordinator_port(), + if ( + mixed_mode + and config.node_name_to_ports[node_name] == config.chosen_random_worker_port ): continue abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) @@ -135,10 +135,7 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): - if mixed_mode and port in ( - config.chosen_random_worker_port, - config.coordinator_port(), - ): + if mixed_mode and port == config.chosen_random_worker_port: continue utils.psql(pg_path, port, "ALTER EXTENSION citus UPDATE;") @@ -148,8 +145,7 @@ def verify_upgrade(config, mixed_mode, node_ports): actual_citus_version = get_actual_citus_version(config.bindir, port) expected_citus_version = MASTER_VERSION if expected_citus_version != actual_citus_version and not ( - mixed_mode - and port in (config.chosen_random_worker_port, config.coordinator_port()) + mixed_mode and port == config.chosen_random_worker_port ): print( "port: {} citus version {} expected {}".format( From 149671415b7ad660d38ec36e5b616c14ec788596 Mon Sep 17 00:00:00 2001 From: Onur Tirtir Date: Tue, 26 Sep 2023 17:52:52 +0300 Subject: [PATCH 18/18] Fix mixed Citus upgrade tests (#7218) When testing rolling Citus upgrades, coordinator should not be upgraded until we upgrade all the workers. --------- Co-authored-by: Jelte Fennema-Nio (cherry picked from commit 27ac44eb2a6be1c12c06f193d8d8511509a54bca) --- .../after_citus_upgrade_coord_schedule | 1 + src/test/regress/after_pg_upgrade_schedule | 2 +- .../citus_tests/upgrade/citus_upgrade_test.py | 14 ++- .../regress/expected/upgrade_basic_after.out | 94 ------------------- .../upgrade_basic_after_non_mixed.out | 94 +++++++++++++++++++ src/test/regress/sql/upgrade_basic_after.sql | 42 --------- .../sql/upgrade_basic_after_non_mixed.sql | 42 +++++++++ 7 files changed, 147 insertions(+), 142 deletions(-) create mode 100644 src/test/regress/expected/upgrade_basic_after_non_mixed.out create mode 100644 src/test/regress/sql/upgrade_basic_after_non_mixed.sql diff --git a/src/test/regress/after_citus_upgrade_coord_schedule b/src/test/regress/after_citus_upgrade_coord_schedule index 6a2a5255a11..c5468616ebb 100644 --- a/src/test/regress/after_citus_upgrade_coord_schedule +++ b/src/test/regress/after_citus_upgrade_coord_schedule @@ -3,6 +3,7 @@ test: upgrade_citus_finish_citus_upgrade test: upgrade_pg_dist_cleanup_after test: upgrade_basic_after +test: upgrade_basic_after_non_mixed test: upgrade_partition_constraints_after test: upgrade_pg_dist_object_test_after test: upgrade_columnar_metapage_after diff --git a/src/test/regress/after_pg_upgrade_schedule b/src/test/regress/after_pg_upgrade_schedule index 6b8c3973f59..5928b173608 100644 --- a/src/test/regress/after_pg_upgrade_schedule +++ b/src/test/regress/after_pg_upgrade_schedule @@ -1,4 +1,4 @@ -test: upgrade_basic_after upgrade_type_after upgrade_ref2ref_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_distributed_triggers_after +test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_distributed_triggers_after upgrade_basic_after_non_mixed # This attempts dropping citus extension (and rollbacks), so please do # not run in parallel with any other tests. diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 3ea51d5d935..6dfc51e512b 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -102,9 +102,9 @@ def remove_tar_files(tar_path): def restart_databases(pg_path, rel_data_path, mixed_mode, config): for node_name in config.node_name_to_ports.keys(): - if ( - mixed_mode - and config.node_name_to_ports[node_name] == config.chosen_random_worker_port + if mixed_mode and config.node_name_to_ports[node_name] in ( + config.chosen_random_worker_port, + config.coordinator_port(), ): continue abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) @@ -135,7 +135,10 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): - if mixed_mode and port == config.chosen_random_worker_port: + if mixed_mode and port in ( + config.chosen_random_worker_port, + config.coordinator_port(), + ): continue utils.psql(pg_path, port, "ALTER EXTENSION citus UPDATE;") @@ -145,7 +148,8 @@ def verify_upgrade(config, mixed_mode, node_ports): actual_citus_version = get_actual_citus_version(config.bindir, port) expected_citus_version = MASTER_VERSION if expected_citus_version != actual_citus_version and not ( - mixed_mode and port == config.chosen_random_worker_port + mixed_mode + and port in (config.chosen_random_worker_port, config.coordinator_port()) ): print( "port: {} citus version {} expected {}".format( diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index ff118c593f5..cbe3efc7402 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -9,100 +9,6 @@ SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LI upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a) (3 rows) -SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule --- but return a valid value in citus upgrade schedule --- that's why we accept both NULL and MAX()+1 here -SELECT - CASE WHEN MAX(operation_id) IS NULL - THEN true - ELSE nextval('pg_dist_operationid_seq') = MAX(operation_id)+1 - END AS check_operationid - FROM pg_dist_cleanup; - check_operationid ---------------------------------------------------------------------- - t -(1 row) - -SELECT - CASE WHEN MAX(record_id) IS NULL - THEN true - ELSE nextval('pg_dist_cleanup_recordid_seq') = MAX(record_id)+1 - END AS check_recordid - FROM pg_dist_cleanup; - check_recordid ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT last_value > 0 FROM pg_dist_clock_logical_seq; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- If this query gives output it means we've added a new sequence that should --- possibly be restored after upgrades. -SELECT sequence_name FROM information_schema.sequences - WHERE sequence_name LIKE 'pg_dist_%' - AND sequence_name NOT IN ( - -- these ones are restored above - 'pg_dist_shardid_seq', - 'pg_dist_placement_placementid_seq', - 'pg_dist_groupid_seq', - 'pg_dist_node_nodeid_seq', - 'pg_dist_colocationid_seq', - 'pg_dist_operationid_seq', - 'pg_dist_cleanup_recordid_seq', - 'pg_dist_background_job_job_id_seq', - 'pg_dist_background_task_task_id_seq', - 'pg_dist_clock_logical_seq' - ); - sequence_name ---------------------------------------------------------------------- -(0 rows) - SELECT logicalrelid FROM pg_dist_partition JOIN pg_depend ON logicalrelid=objid JOIN pg_catalog.pg_class ON logicalrelid=oid diff --git a/src/test/regress/expected/upgrade_basic_after_non_mixed.out b/src/test/regress/expected/upgrade_basic_after_non_mixed.out new file mode 100644 index 00000000000..8dbc13babf8 --- /dev/null +++ b/src/test/regress/expected/upgrade_basic_after_non_mixed.out @@ -0,0 +1,94 @@ +SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule +-- but return a valid value in citus upgrade schedule +-- that's why we accept both NULL and MAX()+1 here +SELECT + CASE WHEN MAX(operation_id) IS NULL + THEN true + ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) + END AS check_operationid + FROM pg_dist_cleanup; + check_operationid +--------------------------------------------------------------------- + t +(1 row) + +SELECT + CASE WHEN MAX(record_id) IS NULL + THEN true + ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) + END AS check_recordid + FROM pg_dist_cleanup; + check_recordid +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT last_value > 0 FROM pg_dist_clock_logical_seq; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- If this query gives output it means we've added a new sequence that should +-- possibly be restored after upgrades. +SELECT sequence_name FROM information_schema.sequences + WHERE sequence_name LIKE 'pg_dist_%' + AND sequence_name NOT IN ( + -- these ones are restored above + 'pg_dist_shardid_seq', + 'pg_dist_placement_placementid_seq', + 'pg_dist_groupid_seq', + 'pg_dist_node_nodeid_seq', + 'pg_dist_colocationid_seq', + 'pg_dist_operationid_seq', + 'pg_dist_cleanup_recordid_seq', + 'pg_dist_background_job_job_id_seq', + 'pg_dist_background_task_task_id_seq', + 'pg_dist_clock_logical_seq' + ); + sequence_name +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/sql/upgrade_basic_after.sql b/src/test/regress/sql/upgrade_basic_after.sql index 03c218a0657..96712acf4ce 100644 --- a/src/test/regress/sql/upgrade_basic_after.sql +++ b/src/test/regress/sql/upgrade_basic_after.sql @@ -3,48 +3,6 @@ BEGIN; -- We have the tablename filter to avoid adding an alternative output for when the coordinator is in metadata vs when not SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LIKE 'r_%' ORDER BY tablename; -SELECT nextval('pg_dist_shardid_seq') = MAX(shardid)+1 FROM pg_dist_shard; -SELECT nextval('pg_dist_placement_placementid_seq') = MAX(placementid)+1 FROM pg_dist_placement; -SELECT nextval('pg_dist_groupid_seq') = MAX(groupid)+1 FROM pg_dist_node; -SELECT nextval('pg_dist_node_nodeid_seq') = MAX(nodeid)+1 FROM pg_dist_node; -SELECT nextval('pg_dist_colocationid_seq') = MAX(colocationid)+1 FROM pg_dist_colocation; --- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule --- but return a valid value in citus upgrade schedule --- that's why we accept both NULL and MAX()+1 here -SELECT - CASE WHEN MAX(operation_id) IS NULL - THEN true - ELSE nextval('pg_dist_operationid_seq') = MAX(operation_id)+1 - END AS check_operationid - FROM pg_dist_cleanup; -SELECT - CASE WHEN MAX(record_id) IS NULL - THEN true - ELSE nextval('pg_dist_cleanup_recordid_seq') = MAX(record_id)+1 - END AS check_recordid - FROM pg_dist_cleanup; -SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; -SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; -SELECT last_value > 0 FROM pg_dist_clock_logical_seq; - --- If this query gives output it means we've added a new sequence that should --- possibly be restored after upgrades. -SELECT sequence_name FROM information_schema.sequences - WHERE sequence_name LIKE 'pg_dist_%' - AND sequence_name NOT IN ( - -- these ones are restored above - 'pg_dist_shardid_seq', - 'pg_dist_placement_placementid_seq', - 'pg_dist_groupid_seq', - 'pg_dist_node_nodeid_seq', - 'pg_dist_colocationid_seq', - 'pg_dist_operationid_seq', - 'pg_dist_cleanup_recordid_seq', - 'pg_dist_background_job_job_id_seq', - 'pg_dist_background_task_task_id_seq', - 'pg_dist_clock_logical_seq' - ); - SELECT logicalrelid FROM pg_dist_partition JOIN pg_depend ON logicalrelid=objid JOIN pg_catalog.pg_class ON logicalrelid=oid diff --git a/src/test/regress/sql/upgrade_basic_after_non_mixed.sql b/src/test/regress/sql/upgrade_basic_after_non_mixed.sql new file mode 100644 index 00000000000..17b8367fbbd --- /dev/null +++ b/src/test/regress/sql/upgrade_basic_after_non_mixed.sql @@ -0,0 +1,42 @@ +SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; +SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; +SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; +SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; +SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; + +-- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule +-- but return a valid value in citus upgrade schedule +-- that's why we accept both NULL and MAX()+1 here +SELECT + CASE WHEN MAX(operation_id) IS NULL + THEN true + ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) + END AS check_operationid + FROM pg_dist_cleanup; +SELECT + CASE WHEN MAX(record_id) IS NULL + THEN true + ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) + END AS check_recordid + FROM pg_dist_cleanup; +SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; +SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; +SELECT last_value > 0 FROM pg_dist_clock_logical_seq; + +-- If this query gives output it means we've added a new sequence that should +-- possibly be restored after upgrades. +SELECT sequence_name FROM information_schema.sequences + WHERE sequence_name LIKE 'pg_dist_%' + AND sequence_name NOT IN ( + -- these ones are restored above + 'pg_dist_shardid_seq', + 'pg_dist_placement_placementid_seq', + 'pg_dist_groupid_seq', + 'pg_dist_node_nodeid_seq', + 'pg_dist_colocationid_seq', + 'pg_dist_operationid_seq', + 'pg_dist_cleanup_recordid_seq', + 'pg_dist_background_job_job_id_seq', + 'pg_dist_background_task_task_id_seq', + 'pg_dist_clock_logical_seq' + );